diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..aa3cf6c8 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,3 @@ +[env] +CMAKE = "/opt/homebrew/bin/cmake" +PATH = { value = "/opt/homebrew/bin:${PATH}", force = false } diff --git a/.chglog/CHANGELOG.tpl.md b/.chglog/CHANGELOG.tpl.md deleted file mode 100755 index 953e1957..00000000 --- a/.chglog/CHANGELOG.tpl.md +++ /dev/null @@ -1,30 +0,0 @@ -{{ range .Versions }} - -## {{ if .Tag.Previous }}[{{ .Tag.Name }}]({{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}){{ else }}{{ .Tag.Name }}{{ end }} ({{ datetime "2006-01-02" .Tag.Date }}) - -{{ range .CommitGroups -}} -### {{ .Title }} - -{{ range .Commits -}} -* {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} - _by {{ .Author.Name }}_ -{{ end }} -{{ end -}} - -{{- if .RevertCommits -}} -### Reverts - -{{ range .RevertCommits -}} -* {{ .Revert.Header }} - _by {{ .Revert.Author.Name }}_ -{{ end }} -{{ end -}} - -{{- if .NoteGroups -}} -{{ range .NoteGroups -}} -### {{ .Title }} - -{{ range .Notes }} -{{ .Body }} -{{ end }} -{{ end -}} -{{ end -}} -{{ end -}} \ No newline at end of file diff --git a/.chglog/config.yml b/.chglog/config.yml deleted file mode 100755 index 9c4cc408..00000000 --- a/.chglog/config.yml +++ /dev/null @@ -1,36 +0,0 @@ -style: github -template: CHANGELOG.tpl.md -info: - title: CHANGELOG - repository_url: https://github.com/FunctionStream/function-stream -options: - commit_groups: - title_maps: - feat: Features - fix: Bug Fixes - perf: Performance Improvements - refactor: Code Refactoring - docs: Documentation - style: Styles - test: Tests - chore: Miscellaneous - sort_by: Custom - title_order: - - feat - - fix - - perf - - refactor - - docs - - style - - test - - chore - header: - pattern: "^(\\w*)(?:\\(([\\w\\$\\.\\-\\*\\s]*)\\))?\\:\\s(.*)$" - pattern_maps: - - Type - - Scope - - Subject - notes: - keywords: - - BREAKING CHANGE - tag_filter_pattern: '^v\d+\.\d+\.\d+$' diff --git a/.codacy.yml b/.codacy.yml new file mode 100644 index 00000000..0df47483 --- /dev/null +++ b/.codacy.yml @@ -0,0 +1,23 @@ +# Codacy configuration file +# Exclude some warnings that are acceptable during refactoring phase + +exclude_paths: + - "target/**" + - "**/generated/**" + - "**/bindings/**" + - "examples/**" + - "README.md" + - "cli/README.md" + # Build scripts use subprocess with controlled build-time inputs only + - "python/functionstream-runtime/build.py" + - "python/functionstream-api/build_package.py" + # Ignore: apt pin versions, exec (required for Python module loading), pass style + - "Dockerfile" + - "python/functionstream-runtime/src/fs_runtime/runner.py" + - "python/functionstream-client/src/fs_client/exceptions.py" + +# Exclude some code quality checks (during refactoring phase) +exclude_patterns: + - ".*unused.*" + - ".*complexity.*" + - ".*duplication.*" \ No newline at end of file diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..4bd972cf --- /dev/null +++ b/.dockerignore @@ -0,0 +1,19 @@ +target +.git +.github +.gitignore +*.md +.env +.env.* +dist +*.zip +*.tar.gz +.DS_Store +.cursor +examples +*.pyc +__pycache__ +.codacy.yml +docker-compose.yml +Dockerfile +.dockerignore diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index 3cc9fc32..00000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -name: CI -on: - pull_request: - branches: - - main - push: - branches: - - main - -jobs: - test: - runs-on: ubuntu-latest - strategy: - matrix: - go-version: [ '1.22' ] - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go-version }} - - uses: acifani/setup-tinygo@v2 - with: - tinygo-version: '0.31.2' - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - version: v1.55.2 - args: --timeout=10m - skip-pkg-cache: true - - run: docker compose -f ./tests/docker-compose.yaml up -d - - run: make build-all - - name: Wait for Pulsar service - run: until curl http://localhost:8080/metrics > /dev/null 2>&1 ; do sleep 1; done - - run: make test - - name: Collect Docker Compose logs - if: failure() - run: docker compose -f ./tests/docker-compose.yaml logs || true - - name: Collect nohup logs - if: failure() - run: cat nohup.out || true - - test-python-sdk: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - working-directory: sdks/fs-python - run: | - pip install . - python -m pip install --upgrade pip - pip install -r requirements.txt - pip install pytest - - name: Run Python SDK tests - working-directory: sdks/fs-python - run: | - make test - - test-operator: - runs-on: ubuntu-latest - strategy: - matrix: - go-version: [ '1.24' ] - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go-version }} - - name: Test Operator - working-directory: operator - run: | - go mod tidy - make test diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..7ed3949d --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,63 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +env: + CARGO_TERM_COLOR: always + +jobs: + build-and-test: + name: Linux build and test + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + + - name: Install build dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + build-essential \ + cmake \ + libssl-dev \ + pkg-config \ + libsasl2-dev \ + protobuf-compiler + # Verify cmake is available + cmake --version + which cmake + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + shared-key: "linux-build" + + - name: Set CMAKE environment variable + run: | + echo "CMAKE=$(which cmake)" >> $GITHUB_ENV + echo "CMAKE set to: $(which cmake)" + + - name: Generate protocol files + run: cargo check --package protocol + + - name: Format generated code + run: cargo fmt --all + + - name: Check format + run: cargo fmt --all -- --check + + - name: Build + run: cargo build --all-targets --workspace + + - name: Test + run: cargo test --workspace diff --git a/.gitignore b/.gitignore index dc933403..b57cc828 100644 --- a/.gitignore +++ b/.gitignore @@ -1,116 +1,29 @@ -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Microbundle cache -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env -.env.test - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# Next.js build output -.next - -# Nuxt.js build / generate output -.nuxt -dist - -# Gatsby files +/data/** +**/target/** +.idea/ +**/.vscode/ +**/.venv/** +**/dist/** +logs/** + +**.wasm + +protocol/generated/** +function-stream/** +distribution/ + +examples/go-processor/bindings/** +examples/go-processor/build/** +examples/python-processor/build/** +examples/python-processor/dependencies/** +python/functionstream-client/src/fs_client/_proto/ +python/functionstream-api/build + + +# python Runtime - Build artifacts and intermediate files +python/**/bindings/ +python/**/dependencies/ +python/**/target/ +**/**.egg-info .cache/ -# Comment in the public line in if your project uses Gatsby and *not* Next.js -# https://nextjs.org/blog/next-9-1#public-directory-support -# public - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# TernJS port file -.tern-port - -.idea -.run -bin/ -.DS_Store - -benchmark/*.pprof - -operator/vendor/ - -._* -**/.DS_Store +/.cursor/worktrees.json diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index f91de055..00000000 --- a/.golangci.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -run: - deadline: 5m - allow-parallel-runners: true - -issues: - # don't skip warning about doc comments - # don't exclude the default set of lint - exclude-use-default: false - # restore some of the defaults - # (fill in the rest as needed) - exclude-rules: - - path: "api/*" - linters: - - lll - - path: "internal/*" - linters: - - dupl - - lll -linters: - disable-all: true - enable: - - dupl - - errcheck - - exportloopref - - goconst - - gocyclo - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - lll - - misspell - - nakedret - - prealloc - - staticcheck - - typecheck - - unconvert - - unparam - - unused diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 35f6a523..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,117 +0,0 @@ - - -## [v0.5.0](https://github.com/FunctionStream/function-stream/compare/v0.4.0...v0.5.0) (2024-06-13) - -### Features - -* add data schema support ([#182](https://github.com/FunctionStream/function-stream/issues/182)) - _by Zike Yang_ -* improve log ([#181](https://github.com/FunctionStream/function-stream/issues/181)) - _by Zike Yang_ -* improve tube configuration ([#180](https://github.com/FunctionStream/function-stream/issues/180)) - _by Zike Yang_ -* support tls ([#179](https://github.com/FunctionStream/function-stream/issues/179)) - _by Zike Yang_ -* function store ([#178](https://github.com/FunctionStream/function-stream/issues/178)) - _by Zike Yang_ - -### Documentation - -* add changelog for v0.4.0 ([#177](https://github.com/FunctionStream/function-stream/issues/177)) - _by Zike Yang_ - - - -## [v0.4.0](https://github.com/FunctionStream/function-stream/compare/v0.3.0...v0.4.0) (2024-05-09) - -### Features - -* add contube config validation ([#174](https://github.com/FunctionStream/function-stream/issues/174)) - _by Zike Yang_ -* support pluggable state store ([#173](https://github.com/FunctionStream/function-stream/issues/173)) - _by Zike Yang_ -* improve function configuration ([#170](https://github.com/FunctionStream/function-stream/issues/170)) - _by Zike Yang_ -* improve configuration ([#169](https://github.com/FunctionStream/function-stream/issues/169)) - _by Zike Yang_ -* refactor fs http service ([#168](https://github.com/FunctionStream/function-stream/issues/168)) - _by Zike Yang_ - -### Build - -* add change log ([#160](https://github.com/FunctionStream/function-stream/issues/160)) - _by Zike Yang_ -* **deps:** bump google.golang.org/protobuf from 1.32.0 to 1.33.0 ([#162](https://github.com/FunctionStream/function-stream/issues/162)) - _by dependabot[bot]_ - -### Bug Fixes - -* prevent panic by closing channel in NewSourceTube goroutine ([#156](https://github.com/FunctionStream/function-stream/issues/156)) - _by wy-os_ -* **tube:** move the getOrCreatChan outside of the goroutine ([#161](https://github.com/FunctionStream/function-stream/issues/161)) - _by wy-os_ - -### Tests - -* fix duplicated close on the server ([#163](https://github.com/FunctionStream/function-stream/issues/163)) - _by Zike Yang_ - - - -## [v0.3.0](https://github.com/FunctionStream/function-stream/compare/v0.2.0...v0.3.0) (2024-03-13) - -### Features - -* state store ([#153](https://github.com/FunctionStream/function-stream/issues/153)) - _by Zike Yang_ -* add http source tube ([#149](https://github.com/FunctionStream/function-stream/issues/149)) - _by Zike Yang_ -* add sink, source and runtime config to function config ([#136](https://github.com/FunctionStream/function-stream/issues/136)) - _by Zike Yang_ -* add grpc runtime ([#135](https://github.com/FunctionStream/function-stream/issues/135)) - _by Zike Yang_ - -### Tests - -* add tests for chan utils ([#140](https://github.com/FunctionStream/function-stream/issues/140)) - _by wy-os_ -* fix flaky test for grpc_func ([#142](https://github.com/FunctionStream/function-stream/issues/142)) - _by Zike Yang_ - -### Bug Fixes - -* fix deadlock issue in grpc_func and add cors support ([#158](https://github.com/FunctionStream/function-stream/issues/158)) - _by Zike Yang_ -* cli doesn't respect replica when creating function ([#145](https://github.com/FunctionStream/function-stream/issues/145)) - _by Zike Yang_ -* fix race condition issues in function manager ([#141](https://github.com/FunctionStream/function-stream/issues/141)) - _by Zike Yang_ -* fix context value setting for the function instance ([#139](https://github.com/FunctionStream/function-stream/issues/139)) - _by wy-os_ - -### Code Refactoring - -* improve grpc function protocol ([#147](https://github.com/FunctionStream/function-stream/issues/147)) - _by Zike Yang_ -* improve logging ([#146](https://github.com/FunctionStream/function-stream/issues/146)) - _by Zike Yang_ - -### Miscellaneous - -* rename module ([#137](https://github.com/FunctionStream/function-stream/issues/137)) - _by Zike Yang_ - - - -## [v0.2.0](https://github.com/FunctionStream/function-stream/compare/v0.1.0...v0.2.0) (2024-02-17) - -### Features - -* add directory structure to readme and improve structure ([#132](https://github.com/FunctionStream/function-stream/issues/132)) - _by Zike Yang_ -* support basic function operations using CLI tool ([#128](https://github.com/FunctionStream/function-stream/issues/128)) - _by Zike Yang_ -* support pluggable queue ([#125](https://github.com/FunctionStream/function-stream/issues/125)) - _by Zike Yang_ -* support delete function ([#3](https://github.com/FunctionStream/function-stream/issues/3)) - _by Zike Yang_ -* add integration test and CI ([#1](https://github.com/FunctionStream/function-stream/issues/1)) - _by Zike Yang_ -* support loading wasm file - _by Zike Yang_ - -### License - -* update license header ([#130](https://github.com/FunctionStream/function-stream/issues/130)) - _by Zike Yang_ - -### Build - -* add license checker ([#7](https://github.com/FunctionStream/function-stream/issues/7)) - _by Zike Yang_ - -### Bug Fixes - -* fix mem queue bench doesn't show result ([#129](https://github.com/FunctionStream/function-stream/issues/129)) - _by Zike Yang_ - -### Performance Improvements - -* improve performance ([#124](https://github.com/FunctionStream/function-stream/issues/124)) - _by Zike Yang_ -* add bench perf ([#6](https://github.com/FunctionStream/function-stream/issues/6)) - _by Zike Yang_ - -### Code Refactoring - -* use tube term instead of queue ([#134](https://github.com/FunctionStream/function-stream/issues/134)) - _by Zike Yang_ -* abstract contube-go impl ([#131](https://github.com/FunctionStream/function-stream/issues/131)) - _by Zike Yang_ - -### Documentation - -* fix readme format ([#133](https://github.com/FunctionStream/function-stream/issues/133)) - _by Zike Yang_ - - - -## v0.1.0 (2021-06-28) - diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000..98bc0ddd --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,4601 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "const-random", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "ambient-authority" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9d4ee0d472d1cd2e28c97dfa124b3d8d992e10eb0a035f33f5d12e3a177ba3b" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.61.2", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "ar_archive_writer" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c269894b6fe5e9d7ada0cf69b5bf847ff35bc25fc271f08e1d080fce80339a" +dependencies = [ + "object 0.32.2", +] + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" + +[[package]] +name = "arrow-array" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f4a9468c882dc66862cef4e1fd8423d47e67972377d85d80e022786427768c" +dependencies = [ + "ahash", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "hashbrown 0.14.5", + "num", +] + +[[package]] +name = "arrow-buffer" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c975484888fc95ec4a632cdc98be39c085b1bb518531b0c80c5d462063e5daa1" +dependencies = [ + "bytes", + "half", + "num", +] + +[[package]] +name = "arrow-cast" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da26719e76b81d8bc3faad1d4dbdc1bcc10d14704e63dc17fc9f3e7e1e567c8e" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "atoi", + "base64 0.22.1", + "chrono", + "half", + "lexical-core", + "num", + "ryu", +] + +[[package]] +name = "arrow-data" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd9d6f18c65ef7a2573ab498c374d8ae364b4a4edf67105357491c031f716ca5" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num", +] + +[[package]] +name = "arrow-ipc" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e786e1cdd952205d9a8afc69397b317cfbb6e0095e445c69cda7e8da5c1eeb0f" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "flatbuffers", +] + +[[package]] +name = "arrow-schema" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e972cd1ff4a4ccd22f86d3e53e835c2ed92e0eea6a3e8eadb72b4f1ac802cf8" + +[[package]] +name = "arrow-select" +version = "52.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "600bae05d43483d216fb3494f8c32fdbefd8aa4e1de237e790dbb3d9f44690a3" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.113", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.113", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +dependencies = [ + "allocator-api2", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "cap-fs-ext" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5528f85b1e134ae811704e41ef80930f56e795923f866813255bc342cc20654" +dependencies = [ + "cap-primitives", + "cap-std", + "io-lifetimes", + "windows-sys 0.59.0", +] + +[[package]] +name = "cap-net-ext" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20a158160765c6a7d0d8c072a53d772e4cb243f38b04bfcf6b4939cfbe7482e7" +dependencies = [ + "cap-primitives", + "cap-std", + "rustix 1.1.3", + "smallvec", +] + +[[package]] +name = "cap-primitives" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6cf3aea8a5081171859ef57bc1606b1df6999df4f1110f8eef68b30098d1d3a" +dependencies = [ + "ambient-authority", + "fs-set-times", + "io-extras", + "io-lifetimes", + "ipnet", + "maybe-owned", + "rustix 1.1.3", + "rustix-linux-procfs", + "windows-sys 0.59.0", + "winx", +] + +[[package]] +name = "cap-rand" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8144c22e24bbcf26ade86cb6501a0916c46b7e4787abdb0045a467eb1645a1d" +dependencies = [ + "ambient-authority", + "rand", +] + +[[package]] +name = "cap-std" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6dc3090992a735d23219de5c204927163d922f42f575a0189b005c62d37549a" +dependencies = [ + "cap-primitives", + "io-extras", + "io-lifetimes", + "rustix 1.1.3", +] + +[[package]] +name = "cap-time-ext" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def102506ce40c11710a9b16e614af0cde8e76ae51b1f48c04b8d79f4b671a80" +dependencies = [ + "ambient-authority", + "cap-primitives", + "iana-time-zone", + "once_cell", + "rustix 1.1.3", + "winx", +] + +[[package]] +name = "cc" +version = "1.2.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "num-traits", + "windows-link", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "clap_lex" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" + +[[package]] +name = "clipboard-win" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bde03770d3df201d4fb868f2c9c59e66a3e4e2bd06692a0fe701e7103c7e84d4" +dependencies = [ + "error-code", +] + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.17", +] + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "comfy-table" +version = "7.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" +dependencies = [ + "crossterm", + "unicode-segmentation", + "unicode-width 0.2.2", +] + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.16", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpp_demangle" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2bb79cb74d735044c972aae58ed0aaa9a837e85b01106a54c39e42e97f62253" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "cranelift-bforest" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88c1d02b72b6c411c0a2e92b25ed791ad5d071184193c08a34aa0fdcdf000b72" +dependencies = [ + "cranelift-entity", +] + +[[package]] +name = "cranelift-bitset" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "720b93bd86ebbb23ebfb2db1ed44d54b2ecbdbb2d034d485bc64aa605ee787ab" +dependencies = [ + "serde", + "serde_derive", +] + +[[package]] +name = "cranelift-codegen" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aed3d2d9914d30b460eedd7fd507720203023997bef71452ce84873f9c93537c" +dependencies = [ + "bumpalo", + "cranelift-bforest", + "cranelift-bitset", + "cranelift-codegen-meta", + "cranelift-codegen-shared", + "cranelift-control", + "cranelift-entity", + "cranelift-isle", + "gimli", + "hashbrown 0.14.5", + "log", + "postcard", + "regalloc2", + "rustc-hash 2.1.1", + "serde", + "serde_derive", + "sha2", + "smallvec", + "target-lexicon", +] + +[[package]] +name = "cranelift-codegen-meta" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "888c188d32263ec9e048873ff0b68c700933600d553f4412417916828be25f8e" +dependencies = [ + "cranelift-codegen-shared", +] + +[[package]] +name = "cranelift-codegen-shared" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ddd5f4114d04ce7e073dd74e2ad16541fc61970726fcc8b2d5644a154ee4127" + +[[package]] +name = "cranelift-control" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92cc4c98d6a4256a1600d93ccd3536f3e77da9b4ca2c279de786ac22876e67d6" +dependencies = [ + "arbitrary", +] + +[[package]] +name = "cranelift-entity" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "760af4b5e051b5f82097a27274b917e3751736369fa73660513488248d27f23d" +dependencies = [ + "cranelift-bitset", + "serde", + "serde_derive", +] + +[[package]] +name = "cranelift-frontend" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0bf77ec0f470621655ec7539860b5c620d4f91326654ab21b075b83900f8831" +dependencies = [ + "cranelift-codegen", + "log", + "smallvec", + "target-lexicon", +] + +[[package]] +name = "cranelift-isle" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b665d0a6932c421620be184f9fc7f7adaf1b0bc2fa77bb7ac5177c49abf645b" + +[[package]] +name = "cranelift-native" +version = "0.115.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2e75d1bd43dfec10924798f15e6474f1dbf63b0024506551aa19394dbe72ab" +dependencies = [ + "cranelift-codegen", + "libc", + "target-lexicon", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crossterm" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" +dependencies = [ + "bitflags 2.10.0", + "crossterm_winapi", + "document-features", + "parking_lot", + "rustix 1.1.3", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "directories-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + +[[package]] +name = "duct" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" +dependencies = [ + "libc", + "once_cell", + "os_pipe", + "shared_child", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "error-code" +version = "3.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea2df4cf52843e0452895c455a1a2cfbb842a1e7329671acf418fdc53ed4c59" + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fd-lock" +version = "4.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78" +dependencies = [ + "cfg-if", + "rustix 1.1.3", + "windows-sys 0.59.0", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flatbuffers" +version = "24.12.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1baf0dbf96932ec9a3038d57900329c015b0bfb7b63d904f3bc27e2b02a096" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs-set-times" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94e7099f6313ecacbe1256e8ff9d617b75d1bcb16a6fddef94866d225a01a14a" +dependencies = [ + "io-lifetimes", + "rustix 1.1.3", + "windows-sys 0.59.0", +] + +[[package]] +name = "function-stream" +version = "0.1.0" +dependencies = [ + "anyhow", + "arrow-array", + "arrow-ipc", + "arrow-schema", + "async-trait", + "base64 0.22.1", + "bincode", + "clap", + "crossbeam-channel", + "log", + "lru", + "num_cpus", + "parking_lot", + "pest", + "pest_derive", + "proctitle", + "protocol", + "rdkafka", + "rocksdb", + "serde", + "serde_json", + "serde_yaml", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-appender", + "tracing-subscriber", + "uuid", + "wasmtime", + "wasmtime-wasi", +] + +[[package]] +name = "function-stream-cli" +version = "0.1.0" +dependencies = [ + "arrow-array", + "arrow-ipc", + "arrow-schema", + "clap", + "comfy-table", + "function-stream", + "protocol", + "rustyline", + "rustyline-derive", + "thiserror 2.0.17", + "tokio", + "tonic", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "fxprof-processed-profile" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" +dependencies = [ + "bitflags 2.10.0", + "debugid", + "fxhash", + "serde", + "serde_json", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +dependencies = [ + "fallible-iterator", + "indexmap 2.12.1", + "stable_deref_trait", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.12.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.1", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "io-extras" +version = "0.18.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2285ddfe3054097ef4b2fe909ef8c3bcd1ea52a8f0d274416caebeef39f04a65" +dependencies = [ + "io-lifetimes", + "windows-sys 0.59.0", +] + +[[package]] +name = "io-lifetimes" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06432fb54d3be7964ecd3649233cddf80db2832f47fec34c01f65b3d9d774983" + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "ittapi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b996fe614c41395cdaedf3cf408a9534851090959d90d54a535f675550b64b1" +dependencies = [ + "anyhow", + "ittapi-sys", + "log", +] + +[[package]] +name = "ittapi-sys" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f5385394064fa2c886205dba02598013ce83d3e92d33dbdc0c52fe0e7bf4fc" +dependencies = [ + "cc", +] + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "leb128" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "lexical-core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +dependencies = [ + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "libc" +version = "0.2.179" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5a2d376baa530d1238d133232d15e239abad80d05838b4b59354e5268af431f" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags 2.10.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen 0.65.1", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "mach2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" +dependencies = [ + "libc", +] + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "maybe-owned" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4facc753ae494aeb6e3c22f839b158aebd4f9270f55cd3c79906c45476c47ab4" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "memfd" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad38eb12aea514a0466ea40a80fd8cc83637065948eb4a426e4aa46261175227" +dependencies = [ + "rustix 1.1.3", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "crc32fast", + "hashbrown 0.15.5", + "indexmap 2.12.1", + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "os_pipe" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d8fae84b431384b68627d0f9b3b1245fcf9f46f6c0e3dc902e9dce64edd1967" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pest" +version = "2.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7" +dependencies = [ + "memchr", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "pest_meta" +version = "2.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365" +dependencies = [ + "pest", + "sha2", +] + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap 2.12.1", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.113", +] + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit 0.23.10+spec-1.0.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proctitle" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924cd8a0de90723d63fed19c5035ea129913a0bc998b37686a67f1eaf6a2aab5" +dependencies = [ + "lazy_static", + "libc", + "winapi", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck", + "itertools 0.14.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.113", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "protocol" +version = "0.1.0" +dependencies = [ + "env_logger", + "log", + "prost", + "tonic", + "tonic-build", +] + +[[package]] +name = "psm" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11f2fedc3b7dafdc2851bc52f277377c5473d378859be234bc7ebb593144d01" +dependencies = [ + "ar_archive_writer", + "cc", +] + +[[package]] +name = "pulley-interpreter" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8324e531de91a3c25021a30fb7862d39cc516b61fbb801176acb5ff279ea887b" +dependencies = [ + "cranelift-bitset", + "log", + "sptr", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rdkafka" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f1856d72dbbbea0d2a5b2eaf6af7fb3847ef2746e883b11781446a51dbc85c0" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.9.0+2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" +dependencies = [ + "cmake", + "libc", + "libz-sys", + "num_enum", + "openssl-sys", + "pkg-config", + "sasl2-sys", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regalloc2" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc06e6b318142614e4a48bc725abbf08ff166694835c43c9dae5a9009704639a" +dependencies = [ + "allocator-api2", + "bumpalo", + "hashbrown 0.15.5", + "log", + "rustc-hash 2.1.1", + "serde", + "smallvec", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustix-linux-procfs" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fc84bf7e9aa16c4f2c758f27412dc9841341e16aa682d9c7ac308fe3ee12056" +dependencies = [ + "once_cell", + "rustix 1.1.3", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rustyline" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "clipboard-win", + "fd-lock", + "home", + "libc", + "log", + "memchr", + "nix", + "radix_trie", + "unicode-segmentation", + "unicode-width 0.1.14", + "utf8parse", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustyline-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8218eaf5d960e3c478a1b0f129fa888dd3d8d22eb3de097e9af14c1ab4438024" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ryu" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" + +[[package]] +name = "sasl2-sys" +version = "0.1.22+2.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f2a7f7efd9fc98b3a9033272df10709f5ee3fa0eabbd61a527a3a1ed6bd3c6" +dependencies = [ + "cc", + "duct", + "libc", + "pkg-config", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" +dependencies = [ + "serde", + "serde_core", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "serde_json" +version = "1.0.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.12.1", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared_child" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e362d9935bc50f019969e2f9ecd66786612daae13e8f277be7bfb66e8bed3f7" +dependencies = [ + "libc", + "sigchld", + "windows-sys 0.60.2", +] + +[[package]] +name = "shellexpand" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" +dependencies = [ + "dirs", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "sigchld" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47106eded3c154e70176fc83df9737335c94ce22f821c32d17ed1db1f83badb1" +dependencies = [ + "libc", + "os_pipe", + "signal-hook", +] + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "sptr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.113" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678faa00651c9eb72dd2020cbdf275d92eccb2400d568e419efdd64838145cb4" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "system-interface" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc4592f674ce18521c2a81483873a49596655b179f71c5e05d10c1fe66c78745" +dependencies = [ + "bitflags 2.10.0", + "cap-fs-ext", + "cap-std", + "fd-lock", + "io-lifetimes", + "rustix 0.38.44", + "windows-sys 0.59.0", + "winx", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + +[[package]] +name = "tempfile" +version = "3.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix 1.1.3", + "windows-sys 0.61.2", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.12.1", + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.23.10+spec-1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +dependencies = [ + "indexmap 2.12.1", + "toml_datetime 0.7.5+spec-1.1.0", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-appender" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" +dependencies = [ + "crossbeam-channel", + "thiserror 2.0.17", + "time", + "tracing-subscriber", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "chrono", + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.113", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.221.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc8444fe4920de80a4fe5ab564fff2ae58b6b73166b89751f8c6c93509da32e5" +dependencies = [ + "leb128", + "wasmparser 0.221.3", +] + +[[package]] +name = "wasm-encoder" +version = "0.243.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c55db9c896d70bd9fa535ce83cd4e1f2ec3726b0edd2142079f594fc3be1cb35" +dependencies = [ + "leb128fmt", + "wasmparser 0.243.0", +] + +[[package]] +name = "wasmparser" +version = "0.221.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d06bfa36ab3ac2be0dee563380147a5b81ba10dd8885d7fbbc9eb574be67d185" +dependencies = [ + "bitflags 2.10.0", + "hashbrown 0.15.5", + "indexmap 2.12.1", + "semver", + "serde", +] + +[[package]] +name = "wasmparser" +version = "0.243.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d8db401b0528ec316dfbe579e6ab4152d61739cfe076706d2009127970159d" +dependencies = [ + "bitflags 2.10.0", + "indexmap 2.12.1", + "semver", +] + +[[package]] +name = "wasmprinter" +version = "0.221.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7343c42a97f2926c7819ff81b64012092ae954c5d83ddd30c9fcdefd97d0b283" +dependencies = [ + "anyhow", + "termcolor", + "wasmparser 0.221.3", +] + +[[package]] +name = "wasmtime" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd30973c65eceb0f37dfcc430d83abd5eb24015fdfcab6912f52949287e04f0" +dependencies = [ + "addr2line", + "anyhow", + "async-trait", + "bitflags 2.10.0", + "bumpalo", + "cc", + "cfg-if", + "encoding_rs", + "fxprof-processed-profile", + "gimli", + "hashbrown 0.14.5", + "indexmap 2.12.1", + "ittapi", + "libc", + "libm", + "log", + "mach2", + "memfd", + "object 0.36.7", + "once_cell", + "paste", + "postcard", + "psm", + "pulley-interpreter", + "rayon", + "rustix 0.38.44", + "semver", + "serde", + "serde_derive", + "serde_json", + "smallvec", + "sptr", + "target-lexicon", + "wasm-encoder 0.221.3", + "wasmparser 0.221.3", + "wasmtime-asm-macros", + "wasmtime-cache", + "wasmtime-component-macro", + "wasmtime-component-util", + "wasmtime-cranelift", + "wasmtime-environ", + "wasmtime-fiber", + "wasmtime-jit-debug", + "wasmtime-jit-icache-coherence", + "wasmtime-slab", + "wasmtime-versioned-export-macros", + "wasmtime-winch", + "wat", + "windows-sys 0.59.0", +] + +[[package]] +name = "wasmtime-asm-macros" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6c21dd30d1f3f93ee390ac1a7ec304ecdbfdab6390e1add41a1f52727b0992b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "wasmtime-cache" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabd563cfbfe75c5bf514081f624ca8d18391a37520d8c794abce702474e688c" +dependencies = [ + "anyhow", + "base64 0.21.7", + "directories-next", + "log", + "postcard", + "rustix 0.38.44", + "serde", + "serde_derive", + "sha2", + "toml", + "windows-sys 0.59.0", + "zstd", +] + +[[package]] +name = "wasmtime-component-macro" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f948a6ef3119d52c9f12936970de28ddf3f9bea04bc65571f4a92d2e5ab38f4" +dependencies = [ + "anyhow", + "proc-macro2", + "quote", + "syn 2.0.113", + "wasmtime-component-util", + "wasmtime-wit-bindgen", + "wit-parser", +] + +[[package]] +name = "wasmtime-component-util" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9275aa01ceaaa2fa6c0ecaa5267518d80b9d6e9ae7c7ea42f4c6e073e6a69ef" + +[[package]] +name = "wasmtime-cranelift" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0701a44a323267aae4499672dae422b266cee3135a23b640972ec8c0e10a44a2" +dependencies = [ + "anyhow", + "cfg-if", + "cranelift-codegen", + "cranelift-control", + "cranelift-entity", + "cranelift-frontend", + "cranelift-native", + "gimli", + "itertools 0.12.1", + "log", + "object 0.36.7", + "smallvec", + "target-lexicon", + "thiserror 1.0.69", + "wasmparser 0.221.3", + "wasmtime-environ", + "wasmtime-versioned-export-macros", +] + +[[package]] +name = "wasmtime-environ" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "264c968c1b81d340355ece2be0bc31a10f567ccb6ce08512c3b7d10e26f3cbe5" +dependencies = [ + "anyhow", + "cpp_demangle", + "cranelift-bitset", + "cranelift-entity", + "gimli", + "indexmap 2.12.1", + "log", + "object 0.36.7", + "postcard", + "rustc-demangle", + "semver", + "serde", + "serde_derive", + "smallvec", + "target-lexicon", + "wasm-encoder 0.221.3", + "wasmparser 0.221.3", + "wasmprinter", + "wasmtime-component-util", +] + +[[package]] +name = "wasmtime-fiber" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78505221fd5bd7b07b4e1fa2804edea49dc231e626ad6861adc8f531812973e6" +dependencies = [ + "anyhow", + "cc", + "cfg-if", + "rustix 0.38.44", + "wasmtime-asm-macros", + "wasmtime-versioned-export-macros", + "windows-sys 0.59.0", +] + +[[package]] +name = "wasmtime-jit-debug" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cec0a8e5620ae71bfcaaec78e3076be5b6ebf869f4e6191925d73242224a915" +dependencies = [ + "object 0.36.7", + "rustix 0.38.44", + "wasmtime-versioned-export-macros", +] + +[[package]] +name = "wasmtime-jit-icache-coherence" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedb677ca1b549d98f95e9e1f9251b460090d99a2c196a0614228c064bf2e59" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "wasmtime-slab" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "564905638c132c275d365c1fa074f0b499790568f43148d29de84ccecfb5cb31" + +[[package]] +name = "wasmtime-versioned-export-macros" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e91092e6cf77390eeccee273846a9327f3e8f91c3c6280f60f37809f0e62d29" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "wasmtime-wasi" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a8e04b9a4c68ad018b330a4f4914b82b01dc3582d715ce21a93564c7f26b19f" +dependencies = [ + "anyhow", + "async-trait", + "bitflags 2.10.0", + "bytes", + "cap-fs-ext", + "cap-net-ext", + "cap-rand", + "cap-std", + "cap-time-ext", + "fs-set-times", + "futures", + "io-extras", + "io-lifetimes", + "rustix 0.38.44", + "system-interface", + "thiserror 1.0.69", + "tokio", + "tracing", + "url", + "wasmtime", + "wiggle", + "windows-sys 0.59.0", +] + +[[package]] +name = "wasmtime-winch" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b111d909dc604c741bd8ac2f4af373eaa5c68c34b5717271bcb687688212cef8" +dependencies = [ + "anyhow", + "cranelift-codegen", + "gimli", + "object 0.36.7", + "target-lexicon", + "wasmparser 0.221.3", + "wasmtime-cranelift", + "wasmtime-environ", + "winch-codegen", +] + +[[package]] +name = "wasmtime-wit-bindgen" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f38f7a5eb2f06f53fe943e7fb8bf4197f7cf279f1bc52c0ce56e9d3ffd750a4" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.12.1", + "wit-parser", +] + +[[package]] +name = "wast" +version = "35.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef140f1b49946586078353a453a1d28ba90adfc54dde75710bc1931de204d68" +dependencies = [ + "leb128", +] + +[[package]] +name = "wast" +version = "243.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df21d01c2d91e46cb7a221d79e58a2d210ea02020d57c092e79255cc2999ca7f" +dependencies = [ + "bumpalo", + "leb128fmt", + "memchr", + "unicode-width 0.2.2", + "wasm-encoder 0.243.0", +] + +[[package]] +name = "wat" +version = "1.243.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "226a9a91cd80a50449312fef0c75c23478fcecfcc4092bdebe1dc8e760ef521b" +dependencies = [ + "wast 243.0.0", +] + +[[package]] +name = "wiggle" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b23e3dc273d1e35cab9f38a5f76487aeeedcfa6a3fb594e209ee7b6f8b41dcc" +dependencies = [ + "anyhow", + "async-trait", + "bitflags 2.10.0", + "thiserror 1.0.69", + "tracing", + "wasmtime", + "wiggle-macro", +] + +[[package]] +name = "wiggle-generate" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8738c5a7ef3a9de0fae10f8b84091a2aa4e059d8fef23de202ab689812b6bc6e" +dependencies = [ + "anyhow", + "heck", + "proc-macro2", + "quote", + "shellexpand", + "syn 2.0.113", + "witx", +] + +[[package]] +name = "wiggle-macro" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e882267ac583e013a38a5aaeb83a49b219456ba3aa6e6772440f7213b176e8ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", + "wiggle-generate", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winch-codegen" +version = "28.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6232f40a795be2ce10fc761ed3b403825126a60d12491ac556ea104a932fd18a" +dependencies = [ + "anyhow", + "cranelift-codegen", + "gimli", + "regalloc2", + "smallvec", + "target-lexicon", + "wasmparser 0.221.3", + "wasmtime-cranelift", + "wasmtime-environ", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + +[[package]] +name = "winx" +version = "0.36.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f3fd376f71958b862e7afb20cfe5a22830e1963462f3a17f49d82a6c1d1f42d" +dependencies = [ + "bitflags 2.10.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "wit-parser" +version = "0.221.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "896112579ed56b4a538b07a3d16e562d101ff6265c46b515ce0c701eef16b2ac" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.12.1", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser 0.221.3", +] + +[[package]] +name = "witx" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e366f27a5cabcddb2706a78296a40b8fcc451e1a6aba2fc1d94b4a01bdaaef4b" +dependencies = [ + "anyhow", + "log", + "thiserror 1.0.69", + "wast 35.0.2", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", + "synstructure", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "zmij" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30e0d8dffbae3d840f64bda38e28391faef673a7b5a6017840f2a106c8145868" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "bindgen 0.72.1", + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..6bb23a35 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,59 @@ +[workspace] +members = [ + ".", + "protocol", + "cli/cli", +] + +[package] +name = "function-stream" +version = "0.1.0" +edition = "2024" + +[lib] +name = "function_stream" +path = "src/lib.rs" + +[[bin]] +name = "function-stream" +path = "src/main.rs" + + +[dependencies] +tokio = { version = "1.0", features = ["full"] } +serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.9" +serde_json = "1.0" +uuid = { version = "1.0", features = ["v4"] } +log = "0.4" +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "chrono"] } +tracing-appender = "0.2" +anyhow = "1.0" +thiserror = "2" +tonic = { version = "0.12", features = ["default"] } +async-trait = "0.1" +num_cpus = "1.0" +protocol = { path = "./protocol" } +rdkafka = { version = "0.38", features = ["cmake-build", "ssl", "gssapi"] } +crossbeam-channel = "0.5" +pest = "2.7" +pest_derive = "2.7" +clap = { version = "4.5", features = ["derive"] } +wasmtime = { version = "28.0", features = ["component-model", "async"] } +base64 = "0.22" +wasmtime-wasi = "28.0" +rocksdb = { version = "0.21", features = ["multi-threaded-cf", "lz4"] } +bincode = "1.3" +tokio-stream = "0.1.18" +lru = "0.12" +parking_lot = "0.12" +arrow-array = "52" +arrow-ipc = "52" +arrow-schema = "52" +proctitle = "0.1" + +[features] +default = ["incremental-cache", "python"] +incremental-cache = ["wasmtime/incremental-cache"] +python = [] diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..95fb28bc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ +FROM rust:1-bookworm AS builder + +RUN apt-get update && apt-get install -y --no-install-recommends \ + cmake=3.25.1-1 \ + make \ + clang=1:14.0-55.7~deb12u1 \ + libclang-dev=1:14.0-55.7~deb12u1 \ + python3 \ + python3-venv \ + python3-pip \ + libssl-dev \ + pkg-config \ + libsasl2-dev \ + protobuf-compiler \ + && rm -rf /var/lib/apt/lists/* + +ENV PROTOC=/usr/bin/protoc + +WORKDIR /build + +COPY Makefile Cargo.toml Cargo.lock ./ +COPY protocol ./protocol +COPY cli ./cli +COPY src ./src +COPY python ./python +COPY wit ./wit +COPY conf/config.yaml ./ + +RUN python3 -m venv .venv \ + && .venv/bin/pip install --upgrade pip \ + && .venv/bin/pip install componentize-py \ + && .venv/bin/pip install -e python/functionstream-api + +WORKDIR /build/python/functionstream-runtime +RUN make build +WORKDIR /build + +RUN make build-full + +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates=20230311+deb12u1 \ + libssl3=3.0.18-1~deb12u2 \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +COPY --from=builder /build/target/release/function-stream bin/ +COPY --from=builder \ + /build/python/functionstream-runtime/target/functionstream-python-runtime.wasm \ + data/cache/python-runner/ +COPY conf/config.yaml conf/ +RUN sed -i 's/127.0.0.1/0.0.0.0/' conf/config.yaml + +EXPOSE 8080 + +ENTRYPOINT ["./bin/function-stream"] diff --git a/LICENSE b/LICENSE index 261eeb9e..f49a4e16 100644 --- a/LICENSE +++ b/LICENSE @@ -198,4 +198,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/Makefile b/Makefile index 8a482924..42f673d5 100644 --- a/Makefile +++ b/Makefile @@ -10,65 +10,233 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: license +VERSION := $(shell grep '^version' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/' | tr -d ' ') +ARCH := $(shell uname -m) +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +BUILD_DIR := target/release +DIST_BASE := dist +FULL_DIR := $(DIST_BASE)/function-stream-$(VERSION) +LITE_DIR := $(DIST_BASE)/function-stream-$(VERSION)-lite +PACKAGE_DIR := packages +PYTHON_WASM_PATH := python/functionstream-runtime/target/functionstream-python-runtime.wasm +PYTHON_WASM_NAME := functionstream-python-runtime.wasm + +.PHONY: help clean clean-dist build build-full build-lite package package-full package-lite package-all test install + +help: + @echo "Function Stream Build System" + @echo "" + @echo "Available targets:" + @echo " build - Build full version (debug)" + @echo " build-full - Build full release version" + @echo " build-lite - Build lite release version (no Python)" + @echo " package-full - Build and package full version (.zip and .tar.gz)" + @echo " package-lite - Build and package lite version (.zip and .tar.gz)" + @echo " package-all - Build and package both versions" + @echo " test - Run tests" + @echo " clean - Clean all (cargo, dist, data, logs, sub-modules)" + @echo " clean-dist - Clean distribution directory only" + @echo "" + @echo "Version: $(VERSION)" + @echo "Architecture: $(ARCH)" + @echo "OS: $(OS)" + +# Sub-modules with their own Makefiles +PYTHON_MODULES := python/functionstream-api python/functionstream-client python/functionstream-runtime + +clean: + @echo "Cleaning build artifacts..." + cargo clean + @rm -rf $(DIST_BASE) + @echo "Cleaning data directory..." + @rm -rf data + @echo "Cleaning logs directory..." + @rm -rf logs + @echo "Cleaning sub-modules..." + @for mod in $(PYTHON_MODULES); do \ + if [ -f "$$mod/Makefile" ]; then \ + echo " Cleaning $$mod..."; \ + $(MAKE) -C $$mod clean 2>/dev/null || true; \ + fi; \ + done + @echo "Clean complete" + +clean-dist: + @echo "Cleaning distribution directory..." + @rm -rf $(DIST_BASE) + @echo "Distribution directory cleaned" + build: - go build -v -o bin/function-stream ./cmd + cargo build -build-example: - tinygo build -o bin/example_basic.wasm -target=wasi ./examples/basic - go build -o bin/example_external_function ./examples/basic +build-full: + @echo "Building full release version..." + @if [ ! -f $(PYTHON_WASM_PATH) ]; then \ + echo "Python WASM file not found. Building it first..."; \ + cd python/functionstream-runtime && $(MAKE) build || { \ + echo "Error: Failed to build Python WASM file"; \ + exit 1; \ + }; \ + fi + cargo build --release --features python + @echo "Full version build complete" -run-example-external-functions: - FS_SOCKET_PATH=/tmp/fs.sock FS_FUNCTION_NAME=fs/external-function ./bin/example_external_function +build-lite: + @echo "Building lite release version (no Python)..." + cargo build --release --no-default-features --features incremental-cache + @echo "Lite version build complete" -lint: - golangci-lint run +prepare-dist-full: + @echo "Preparing full distribution..." + @if [ -d $(FULL_DIR) ]; then \ + echo "Removing existing full distribution directory..."; \ + rm -rf $(FULL_DIR); \ + fi + @mkdir -p $(FULL_DIR)/bin + @mkdir -p $(FULL_DIR)/conf + @mkdir -p $(FULL_DIR)/data/cache/python-runner + @mkdir -p $(FULL_DIR)/logs + @if [ -f $(BUILD_DIR)/function-stream ]; then \ + cp $(BUILD_DIR)/function-stream $(FULL_DIR)/bin/ && \ + chmod +x $(FULL_DIR)/bin/function-stream && \ + echo "Copied function-stream binary"; \ + fi + @if [ -f LICENSE ]; then \ + cp LICENSE $(FULL_DIR)/ && \ + echo "Copied LICENSE file"; \ + else \ + echo "Warning: LICENSE file not found"; \ + fi + @if [ -f README.md ]; then \ + cp README.md $(FULL_DIR)/ && \ + echo "Copied README.md"; \ + fi + @if [ -f $(PYTHON_WASM_PATH) ]; then \ + cp $(PYTHON_WASM_PATH) $(FULL_DIR)/data/cache/python-runner/$(PYTHON_WASM_NAME) && \ + echo "Copied Python WASM file to data/cache/python-runner/"; \ + else \ + echo "Warning: Python WASM file not found at $(PYTHON_WASM_PATH)"; \ + echo "Warning: Full version requires Python WASM file. Please build it first:"; \ + echo " cd python/functionstream-runtime && make build"; \ + fi + @if [ -f config.yaml ]; then \ + cp config.yaml $(FULL_DIR)/conf/config.yaml && \ + cp config.yaml $(FULL_DIR)/conf/config.yaml.template && \ + echo "Copied config.yaml and config.yaml.template"; \ + else \ + echo "Warning: config.yaml not found, creating default..."; \ + echo "service:" > $(FULL_DIR)/conf/config.yaml; \ + echo " service_id: \"default-service\"" >> $(FULL_DIR)/conf/config.yaml; \ + echo " service_name: \"function-stream\"" >> $(FULL_DIR)/conf/config.yaml; \ + echo " version: \"$(VERSION)\"" >> $(FULL_DIR)/conf/config.yaml; \ + echo " host: \"127.0.0.1\"" >> $(FULL_DIR)/conf/config.yaml; \ + echo " port: 8080" >> $(FULL_DIR)/conf/config.yaml; \ + echo " debug: false" >> $(FULL_DIR)/conf/config.yaml; \ + echo "logging:" >> $(FULL_DIR)/conf/config.yaml; \ + echo " level: info" >> $(FULL_DIR)/conf/config.yaml; \ + echo " format: json" >> $(FULL_DIR)/conf/config.yaml; \ + echo "python:" >> $(FULL_DIR)/conf/config.yaml; \ + echo " wasm_path: data/cache/python-runner/$(PYTHON_WASM_NAME)" >> $(FULL_DIR)/conf/config.yaml; \ + cp $(FULL_DIR)/conf/config.yaml $(FULL_DIR)/conf/config.yaml.template; \ + fi + @echo "FULL VERSION - $(VERSION)" > $(FULL_DIR)/VERSION.txt + @echo "Build date: $$(date -u +"%Y-%m-%d %H:%M:%S UTC")" >> $(FULL_DIR)/VERSION.txt + @echo "Architecture: $(ARCH)" >> $(FULL_DIR)/VERSION.txt + @echo "OS: $(OS)" >> $(FULL_DIR)/VERSION.txt + @if [ -f $(PYTHON_WASM_PATH) ]; then \ + echo "Python WASM: Included (data/cache/python-runner/$(PYTHON_WASM_NAME))" >> $(FULL_DIR)/VERSION.txt; \ + fi -lint-fix: - golangci-lint run --fix +prepare-dist-lite: + @echo "Preparing lite distribution..." + @if [ -d $(LITE_DIR) ]; then \ + echo "Removing existing lite distribution directory..."; \ + rm -rf $(LITE_DIR); \ + fi + @mkdir -p $(LITE_DIR)/bin + @mkdir -p $(LITE_DIR)/conf + @mkdir -p $(LITE_DIR)/data + @mkdir -p $(LITE_DIR)/logs + @if [ -f $(BUILD_DIR)/function-stream ]; then \ + cp $(BUILD_DIR)/function-stream $(LITE_DIR)/bin/ && \ + chmod +x $(LITE_DIR)/bin/function-stream && \ + echo "Copied function-stream binary"; \ + fi + @if [ -f LICENSE ]; then \ + cp LICENSE $(LITE_DIR)/ && \ + echo "Copied LICENSE file"; \ + else \ + echo "Warning: LICENSE file not found"; \ + fi + @if [ -f README.md ]; then \ + cp README.md $(LITE_DIR)/ && \ + echo "Copied README.md"; \ + fi + @if [ -f config.yaml ]; then \ + cp config.yaml $(LITE_DIR)/conf/config.yaml && \ + cp config.yaml $(LITE_DIR)/conf/config.yaml.template && \ + echo "Copied config.yaml and config.yaml.template"; \ + else \ + echo "Warning: config.yaml not found, creating default..."; \ + echo "service:" > $(LITE_DIR)/conf/config.yaml; \ + echo " service_id: \"default-service\"" >> $(LITE_DIR)/conf/config.yaml; \ + echo " service_name: \"function-stream\"" >> $(LITE_DIR)/conf/config.yaml; \ + echo " version: \"$(VERSION)\"" >> $(LITE_DIR)/conf/config.yaml; \ + echo " host: \"127.0.0.1\"" >> $(LITE_DIR)/conf/config.yaml; \ + echo " port: 8080" >> $(LITE_DIR)/conf/config.yaml; \ + echo " debug: false" >> $(LITE_DIR)/conf/config.yaml; \ + echo "logging:" >> $(LITE_DIR)/conf/config.yaml; \ + echo " level: info" >> $(LITE_DIR)/conf/config.yaml; \ + echo " format: json" >> $(LITE_DIR)/conf/config.yaml; \ + cp $(LITE_DIR)/conf/config.yaml $(LITE_DIR)/conf/config.yaml.template; \ + fi + @echo "LITE VERSION - $(VERSION)" > $(LITE_DIR)/VERSION.txt + @echo "Build date: $$(date -u +"%Y-%m-%d %H:%M:%S UTC")" >> $(LITE_DIR)/VERSION.txt + @echo "Architecture: $(ARCH)" >> $(LITE_DIR)/VERSION.txt + @echo "OS: $(OS)" >> $(LITE_DIR)/VERSION.txt + @echo "Note: Python processor support is disabled" >> $(LITE_DIR)/VERSION.txt -build-all: build build-example +package-full: build-full prepare-dist-full + @if [ ! -f $(FULL_DIR)/LICENSE ]; then \ + echo "Error: LICENSE file is required for distribution but not found"; \ + echo "Apache License 2.0 requires LICENSE file to be included in distribution"; \ + exit 1; \ + fi + @if [ ! -f $(FULL_DIR)/data/cache/python-runner/$(PYTHON_WASM_NAME) ]; then \ + echo "Error: Python WASM file is required for full version but not found in distribution"; \ + echo "Please ensure Python WASM is built before packaging"; \ + exit 1; \ + fi + @echo "Packaging full version..." + @mkdir -p $(DIST_BASE)/$(PACKAGE_DIR) + @cd $(DIST_BASE) && \ + zip -r $(PACKAGE_DIR)/function-stream-$(VERSION).zip function-stream-$(VERSION) && \ + tar -czf $(PACKAGE_DIR)/function-stream-$(VERSION).tar.gz function-stream-$(VERSION) + @echo "Full version packages created:" + @ls -lh $(DIST_BASE)/$(PACKAGE_DIR)/function-stream-$(VERSION).* -test: - go test -race ./... -timeout 10m - -bench: - go test -bench=. ./benchmark -timeout 10m - -bench_race: - go test -race -bench=. ./benchmark -timeout 10m - -get-apidocs: - curl -o apidocs.json http://localhost:7300/apidocs - -ADMIN_CLIENT_DIR := admin/client -FILES_TO_REMOVE := go.mod go.sum .travis.yml .openapi-generator-ignore git_push.sh .openapi-generator api test - -gen-rest-client: - -rm -r $(ADMIN_CLIENT_DIR) - mkdir -p $(ADMIN_CLIENT_DIR) - openapi-generator generate -i ./apidocs.json -g go -o $(ADMIN_CLIENT_DIR) \ - --git-user-id functionstream \ - --git-repo-id function-stream/$(ADMIN_CLIENT_DIR) \ - --package-name adminclient \ - --global-property apiDocs,apis,models,supportingFiles - rm -r $(addprefix $(ADMIN_CLIENT_DIR)/, $(FILES_TO_REMOVE)) - -proto: - for PROTO_FILE in $$(find . -name '*.proto'); do \ - echo "generating codes for $$PROTO_FILE"; \ - protoc \ - --go_out=. \ - --go_opt paths=source_relative \ - --plugin protoc-gen-go="${GOPATH}/bin/protoc-gen-go" \ - --go-grpc_out=. \ - --go-grpc_opt paths=source_relative \ - --plugin protoc-gen-go-grpc="${GOPATH}/bin/protoc-gen-go-grpc" \ - $$PROTO_FILE; \ - done +package-lite: build-lite prepare-dist-lite + @if [ ! -f $(LITE_DIR)/LICENSE ]; then \ + echo "Error: LICENSE file is required for distribution but not found"; \ + echo "Apache License 2.0 requires LICENSE file to be included in distribution"; \ + exit 1; \ + fi + @echo "Packaging lite version..." + @mkdir -p $(DIST_BASE)/$(PACKAGE_DIR) + @cd $(DIST_BASE) && \ + zip -r $(PACKAGE_DIR)/function-stream-$(VERSION)-lite.zip function-stream-$(VERSION)-lite && \ + tar -czf $(PACKAGE_DIR)/function-stream-$(VERSION)-lite.tar.gz function-stream-$(VERSION)-lite + @echo "Lite version packages created:" + @ls -lh $(DIST_BASE)/$(PACKAGE_DIR)/function-stream-$(VERSION)-lite.* -license: - ./license-checker/license-checker.sh +package-all: clean-dist package-full package-lite + @echo "" + @echo "All packages created:" + @ls -lh $(DIST_BASE)/$(PACKAGE_DIR)/ + +test: + cargo test -gen-changelog: - .chglog/gen-chg-log.sh +install: build-full prepare-dist-full + @echo "Installation complete" + @echo "Distribution directory: $(FULL_DIR)" diff --git a/README.md b/README.md index dd859c0e..0e80505c 100644 --- a/README.md +++ b/README.md @@ -1,171 +1,158 @@ - -# Function Stream +## Key Features + +* **Event-Driven WASM Runtime**: Executes polyglot functions (Go, Python, Rust) with near-native performance and sandboxed isolation. +* **Durable State Management**: Built-in support for RocksDB-backed state stores for stateful stream processing. +* **SQL-Powered CLI**: Interactive REPL for job management and stream inspection using SQL-like commands. -Function stream is an event-streaming function platform based on Apache Pulsar and WebAssembly. It enables efficient and -scalable processing of data streams by leveraging the power of WebAssembly. Function Stream provides seamless -integration with Apache Pulsar, allowing users to take full advantage of its robust messaging capabilities. - -## Features - -1. **Support for Multiple Programming Languages**: Function Stream aims to provide the capability to write code using - multiple programming languages. This allows developers to use their preferred language and harness its specific - strengths while working with Function Stream. -2. **High Performance and Throughput**: Function Stream is designed to deliver high performance and handle substantial - throughput. It strives to optimize resource utilization and minimize latency, enabling efficient execution of code - and processing of data. -3. **Isolated Environment**: Function Stream offers an isolated environment for executing code. This ensures that each - function runs independently, without interference from other functions or external factors. The isolation enhances - the security, reliability, and predictability of code execution. -4. **Scalability and Fault Tolerance**: Function Stream focuses on scalability by offering the ability to effortlessly - scale up or down based on workload demands. Additionally, it emphasizes fault tolerance, ensuring that system - failures or errors do not disrupt the overall functioning of the platform. -4. **Support for Complex Data Schema**: Function Stream acknowledges the need to handle diverse data types and formats. - It provides support for complex data schema, including bytes data and JSON format data, among others. This - versatility enables developers to process and manipulate data efficiently within the platform. -6. **Stateful/Stateless Computing**: Function Stream caters to both stateful and stateless computing requirements. It - accommodates scenarios where functions require maintaining state between invocations as well as situations where a - stateless approach is more suitable. This flexibility allows developers to implement the desired architectural - patterns. -7. **Cross-Architecture Platform Execution**: Function Stream aims to be a cross-architecture platform capable of - executing code across different hardware architectures seamlessly. It provides compatibility and portability, - allowing developers to run their code on various platforms without concerns about underlying hardware dependencies. - -## Architecture and Components - -Function Stream is composed of three main components: the WebAssembly runtime engine, the Pulsar client, and the -Function Stream service. The following figure shows the overview of the Function Stream architecture. -![Architecture](docs/images/arch.png) - -The **WebAssembly runtime engine** is responsible for executing the WebAssembly modules that implement the stream -processing logic. The runtime engine supports an interface for the underlying wasm runtime library. We use [wazero -](https://github.com/tetratelabs/wazero) as the -WebAssembly runtime library, as they are both fast and lightweight. The WebAssembly runtime -engine communicates with the Pulsar client through standard IO and file systems. - -**The Pulsar client** is responsible for consuming and publishing the messages from and to the Apache Pulsar cluster. We -use [Pulsar Go client](https://github.com/apache/pulsar-client-go), which is a pure go implementation of the pulsar -client library, to interact with the Pulsar brokers. The Pulsar client handles the data schema, the message metadata, -and the processing guarantees of the messages. - -**The Function Stream service** is responsible for managing the lifecycle and coordination of the WebAssembly instances. - -## Directory Structure - -The Function Stream project is organized as follows: -```plaintext -├── LICENSE # The license for Function Stream -├── Makefile # Contains build automation and commands -├── README.md # README file for the project -├── benchmark # Contains benchmarking tools or results -├── bin # Contains compiled binary files -├── cmd # Contains the command line executable source files -├── common # Contains common utilities and libraries used across the project -├── docs # Documentation for the project -├── examples # Example configurations, scripts, and other reference materials -├── go.mod # Defines the module's module path and its dependency requirements -├── go.sum # Contains the expected cryptographic checksums of the content of specific module versions -├── fs # Core library files for Function Stream -├── license-checker # Tools related to checking license compliance -├── openapi.yaml # API definition file -├── perf # Performance testing scripts -├── restclient # REST client library -├── server # Server-side application source files -└── tests # Contains test scripts and test data +## Repository Layout + +``` +function-stream/ +├── src/ # Core runtime, coordinator, server, config, SQL parser +├── protocol/ # Protocol Buffers definitions and generated gRPC code +├── cli/cli/ # SQL REPL client +├── conf/ # Default runtime configuration +├── examples/ # Sample processors and integration examples +├── python/ # Python API, client, and runtime WASM generator +├── Makefile # Build and packaging automation +└── Cargo.toml # Workspace manifest ``` -## Building Instructions +## Prerequisites -To compile Function Stream, use this command: +- Rust toolchain (recommend `rustup` with stable >= 1.77) +- `protoc` (Protocol Buffers compiler) for generating gRPC bindings +- Build tooling for `rdkafka`: `cmake`, `pkg-config`, and OpenSSL headers +- Optional (for Python processors and full package): + - Python 3.9+ with `venv` + - `componentize-py` (installed via `make install-deps` inside `python/functionstream-runtime`) -```shell -make build-all +## Build From Source + +Clone the repository and install dependencies: + +``` +git clone https://github.com//function-stream.git +cd function-stream +cargo fetch ``` -This creates the function-stream binary program and example wasm files in the `bin` directory, -like `bin/example_basic.wasm`. +Build targets: + +- Debug build (fast iteration): + ``` + cargo build + ``` +- Release build with Python support (default features): + ``` + cargo build --release + ``` +- Release build without Python (lite): + ``` + cargo build --release --no-default-features --features incremental-cache + ``` + +To regenerate Protocol Buffers, rerun the build; `tonic-build` automatically recompiles when `protocol/proto/function_stream.proto` changes. -## Running Instructions +## Run Locally -You have two ways to start the function stream server. +### Prepare configuration -Use this command to start the function stream server: +1. Review `conf/config.yaml` and adjust service host/port, logging, state storage, and Python runtime settings as needed. +2. Optionally point `FUNCTION_STREAM_CONF` to a custom configuration file or directory: + ``` + export FUNCTION_STREAM_CONF=/path/to/config.yaml + ``` +3. Ensure `data/` and `logs/` directories are writable (they are created automatically on startup). -```shell -bin/function-stream server +### Start the control plane + +``` +cargo run --release --bin function-stream ``` -### Creating a Function +The server logs appear under `logs/app.log` (default JSON format). Stop the service with `Ctrl+C`. -We'll use `example_basic.wasm` as an example wasm file. This function increases the money by 1. See the -code [here](examples/basic/main.go). +### Use the SQL CLI -After starting the server, create a function with this command: +Run the interactive CLI from another terminal to issue SQL statements: -```shell -bin/function-stream client create -n example -a "bin/example_basic.wasm" -i example-input -o example-output -r 1 +``` +cargo run -p function-stream-cli -- --ip 127.0.0.1 --port 8080 ``` -This creates a function named `example` using `example_basic.wasm`. It takes messages from `example-input`, produces -messages to `example-output`, and runs with 1 replica. +The CLI connects to the gRPC endpoint exposed by the server. -### Consuming a Message from the Function Output +### Try sample processors -After creating the function, consume a message from the output topic with this command: +- Python processor example: + ``` + cd examples/python-processor + python main.py + ``` +- Kafka integration tests and Go processor examples are under `examples/`. -```shell -bin/function-stream client consume -n example-output -``` +## Packaging -### Producing a Message to the Function Input +Packaging is orchestrated by the top-level `Makefile`. Outputs are placed in `dist/`. -In a new terminal, produce a message to the input topic with this command: +### Full distribution (includes Python runtime) -```shell -bin/function-stream client produce -n example-input -c '{"name":"rbt","money":2}' -``` +1. Create the virtual environment once: + ``` + python3 -m venv .venv + source .venv/bin/activate + make -C python/functionstream-runtime install-deps build + deactivate + ``` +2. Build and package: + ``` + make package-full + ``` -You'll see this log: +Artifacts: +- `dist/function-stream-/` directory containing binaries, config, logs/data skeletons, README, and Python WASM runtime. +- `dist/packages/function-stream-.zip` and `.tar.gz`. + +### Lite distribution (Rust-only) ``` -Event produced +make package-lite ``` -### Checking the Output +Artifacts: +- `dist/function-stream--lite/` directory with binaries and configs. +- `dist/packages/function-stream--lite.zip` and `.tar.gz`. -In the terminal where you consume the message from the output topic, you'll see this log: +### Package all variants ``` -"{\"name\":\"rbt\",\"money\":3,\"expected\":0}" +make package-all ``` -### Deleting the Function +The script cleans previous `dist/` contents, produces both full and lite packages, and lists generated archives. + +## Testing -After testing, delete the function with this command: +Run the Rust test suite: -```shell -bin/function-stream client delete -n example ``` +cargo test +``` + +Python components expose their own `Makefile` targets (for example, `make -C python/functionstream-runtime test` if defined). -## Contributing +## Environment Variables -We're happy to receive contributions from the community. If you find a bug or have a feature request, please open an -issue or submit a pull request. +- `FUNCTION_STREAM_HOME`: Overrides the project root for resolving data/log directories. +- `FUNCTION_STREAM_CONF`: Points to a configuration file or directory containing `config.yaml`. ## License -This project is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). \ No newline at end of file +Licensed under the Apache License, Version 2.0. See `LICENSE` for details. diff --git a/admin/client/.gitignore b/admin/client/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/admin/client/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/admin/client/README.md b/admin/client/README.md deleted file mode 100644 index e67832d6..00000000 --- a/admin/client/README.md +++ /dev/null @@ -1,127 +0,0 @@ -# Go API client for adminclient - -Manage Function Stream Resources - -## Overview -This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [OpenAPI-spec](https://www.openapis.org/) from a remote server, you can easily generate an API client. - -- API version: 1.0.0 -- Package version: 1.0.0 -- Generator version: 7.6.0 -- Build package: org.openapitools.codegen.languages.GoClientCodegen -For more information, please visit [https://github.com/FunctionStream](https://github.com/FunctionStream) - -## Installation - -Install the following dependencies: - -```sh -go get github.com/stretchr/testify/assert -go get golang.org/x/net/context -``` - -Put the package under your project folder and add the following in import: - -```go -import adminclient "github.com/functionstream/function-stream/admin/client" -``` - -To use a proxy, set the environment variable `HTTP_PROXY`: - -```go -os.Setenv("HTTP_PROXY", "http://proxy_name:proxy_port") -``` - -## Configuration of Server URL - -Default configuration comes with `Servers` field that contains server objects as defined in the OpenAPI specification. - -### Select Server Configuration - -For using other server than the one defined on index 0 set context value `adminclient.ContextServerIndex` of type `int`. - -```go -ctx := context.WithValue(context.Background(), adminclient.ContextServerIndex, 1) -``` - -### Templated Server URL - -Templated server URL is formatted using default variables from configuration or from context value `adminclient.ContextServerVariables` of type `map[string]string`. - -```go -ctx := context.WithValue(context.Background(), adminclient.ContextServerVariables, map[string]string{ - "basePath": "v2", -}) -``` - -Note, enum values are always validated and all unused variables are silently ignored. - -### URLs Configuration per Operation - -Each operation can use different server URL defined using `OperationServers` map in the `Configuration`. -An operation is uniquely identified by `"{classname}Service.{nickname}"` string. -Similar rules for overriding default operation server index and variables applies by using `adminclient.ContextOperationServerIndices` and `adminclient.ContextOperationServerVariables` context maps. - -```go -ctx := context.WithValue(context.Background(), adminclient.ContextOperationServerIndices, map[string]int{ - "{classname}Service.{nickname}": 2, -}) -ctx = context.WithValue(context.Background(), adminclient.ContextOperationServerVariables, map[string]map[string]string{ - "{classname}Service.{nickname}": { - "port": "8443", - }, -}) -``` - -## Documentation for API Endpoints - -All URIs are relative to *http://localhost:7300* - -Class | Method | HTTP request | Description ------------- | ------------- | ------------- | ------------- -*FunctionAPI* | [**CreateFunction**](docs/FunctionAPI.md#createfunction) | **Post** /api/v1/function | create a function -*FunctionAPI* | [**DeleteFunction**](docs/FunctionAPI.md#deletefunction) | **Delete** /api/v1/function/{name} | delete a function -*FunctionAPI* | [**DeleteNamespacedFunction**](docs/FunctionAPI.md#deletenamespacedfunction) | **Delete** /api/v1/function/{namespace}/{name} | delete a namespaced function -*FunctionAPI* | [**GetAllFunctions**](docs/FunctionAPI.md#getallfunctions) | **Get** /api/v1/function | get all functions -*FunctionStoreAPI* | [**ReloadFunctions**](docs/FunctionStoreAPI.md#reloadfunctions) | **Get** /api/v1/function-store/reload | reload functions from the function store -*HttpTubeAPI* | [**TriggerHttpTubeEndpoint**](docs/HttpTubeAPI.md#triggerhttptubeendpoint) | **Post** /api/v1/http-tube/{endpoint} | trigger the http tube endpoint -*StateAPI* | [**GetState**](docs/StateAPI.md#getstate) | **Get** /api/v1/state/{key} | get a state -*StateAPI* | [**SetState**](docs/StateAPI.md#setstate) | **Post** /api/v1/state/{key} | set a state -*StatusAPI* | [**GetStatus**](docs/StatusAPI.md#getstatus) | **Get** /api/v1/status | Get the status of the Function Stream -*TubeAPI* | [**ConsumeMessage**](docs/TubeAPI.md#consumemessage) | **Get** /api/v1/consume/{name} | consume a message -*TubeAPI* | [**ProduceMessage**](docs/TubeAPI.md#producemessage) | **Post** /api/v1/produce/{name} | produce a message - - -## Documentation For Models - - - [ModelFunction](docs/ModelFunction.md) - - [ModelRuntimeConfig](docs/ModelRuntimeConfig.md) - - [ModelTubeConfig](docs/ModelTubeConfig.md) - - [RestfulspecSchemaType](docs/RestfulspecSchemaType.md) - - -## Documentation For Authorization - -Endpoints do not require authorization. - - -## Documentation for Utility Methods - -Due to the fact that model structure members are all pointers, this package contains -a number of utility functions to easily obtain pointers to values of basic types. -Each of these functions takes a value of the given basic type and returns a pointer to it: - -* `PtrBool` -* `PtrInt` -* `PtrInt32` -* `PtrInt64` -* `PtrFloat` -* `PtrFloat32` -* `PtrFloat64` -* `PtrString` -* `PtrTime` - -## Author - - - diff --git a/admin/client/api_function.go b/admin/client/api_function.go deleted file mode 100644 index 9b483cb2..00000000 --- a/admin/client/api_function.go +++ /dev/null @@ -1,402 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// FunctionAPIService FunctionAPI service -type FunctionAPIService service - -type ApiCreateFunctionRequest struct { - ctx context.Context - ApiService *FunctionAPIService - body *ModelFunction -} - -func (r ApiCreateFunctionRequest) Body(body ModelFunction) ApiCreateFunctionRequest { - r.body = &body - return r -} - -func (r ApiCreateFunctionRequest) Execute() (*http.Response, error) { - return r.ApiService.CreateFunctionExecute(r) -} - -/* -CreateFunction create a function - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @return ApiCreateFunctionRequest -*/ -func (a *FunctionAPIService) CreateFunction(ctx context.Context) ApiCreateFunctionRequest { - return ApiCreateFunctionRequest{ - ApiService: a, - ctx: ctx, - } -} - -// Execute executes the request -func (a *FunctionAPIService) CreateFunctionExecute(r ApiCreateFunctionRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodPost - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "FunctionAPIService.CreateFunction") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/function" - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - if r.body == nil { - return nil, reportError("body is required and must be specified") - } - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{"application/json"} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - // body params - localVarPostBody = r.body - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} - -type ApiDeleteFunctionRequest struct { - ctx context.Context - ApiService *FunctionAPIService - name string -} - -func (r ApiDeleteFunctionRequest) Execute() (*http.Response, error) { - return r.ApiService.DeleteFunctionExecute(r) -} - -/* -DeleteFunction delete a function - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param name name of the function - @return ApiDeleteFunctionRequest -*/ -func (a *FunctionAPIService) DeleteFunction(ctx context.Context, name string) ApiDeleteFunctionRequest { - return ApiDeleteFunctionRequest{ - ApiService: a, - ctx: ctx, - name: name, - } -} - -// Execute executes the request -func (a *FunctionAPIService) DeleteFunctionExecute(r ApiDeleteFunctionRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodDelete - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "FunctionAPIService.DeleteFunction") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/function/{name}" - localVarPath = strings.Replace(localVarPath, "{"+"name"+"}", url.PathEscape(parameterValueToString(r.name, "name")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} - -type ApiDeleteNamespacedFunctionRequest struct { - ctx context.Context - ApiService *FunctionAPIService - name string - namespace string -} - -func (r ApiDeleteNamespacedFunctionRequest) Execute() (*http.Response, error) { - return r.ApiService.DeleteNamespacedFunctionExecute(r) -} - -/* -DeleteNamespacedFunction delete a namespaced function - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param name name of the function - @param namespace namespace of the function - @return ApiDeleteNamespacedFunctionRequest -*/ -func (a *FunctionAPIService) DeleteNamespacedFunction(ctx context.Context, name string, namespace string) ApiDeleteNamespacedFunctionRequest { - return ApiDeleteNamespacedFunctionRequest{ - ApiService: a, - ctx: ctx, - name: name, - namespace: namespace, - } -} - -// Execute executes the request -func (a *FunctionAPIService) DeleteNamespacedFunctionExecute(r ApiDeleteNamespacedFunctionRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodDelete - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "FunctionAPIService.DeleteNamespacedFunction") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/function/{namespace}/{name}" - localVarPath = strings.Replace(localVarPath, "{"+"name"+"}", url.PathEscape(parameterValueToString(r.name, "name")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"namespace"+"}", url.PathEscape(parameterValueToString(r.namespace, "namespace")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} - -type ApiGetAllFunctionsRequest struct { - ctx context.Context - ApiService *FunctionAPIService -} - -func (r ApiGetAllFunctionsRequest) Execute() ([]string, *http.Response, error) { - return r.ApiService.GetAllFunctionsExecute(r) -} - -/* -GetAllFunctions get all functions - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @return ApiGetAllFunctionsRequest -*/ -func (a *FunctionAPIService) GetAllFunctions(ctx context.Context) ApiGetAllFunctionsRequest { - return ApiGetAllFunctionsRequest{ - ApiService: a, - ctx: ctx, - } -} - -// Execute executes the request -// -// @return []string -func (a *FunctionAPIService) GetAllFunctionsExecute(r ApiGetAllFunctionsRequest) ([]string, *http.Response, error) { - var ( - localVarHTTPMethod = http.MethodGet - localVarPostBody interface{} - formFiles []formFile - localVarReturnValue []string - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "FunctionAPIService.GetAllFunctions") - if err != nil { - return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/function" - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{"application/json"} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return localVarReturnValue, nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) - if err != nil { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: err.Error(), - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - return localVarReturnValue, localVarHTTPResponse, nil -} diff --git a/admin/client/api_function_store.go b/admin/client/api_function_store.go deleted file mode 100644 index 944ae039..00000000 --- a/admin/client/api_function_store.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" -) - -// FunctionStoreAPIService FunctionStoreAPI service -type FunctionStoreAPIService service - -type ApiReloadFunctionsRequest struct { - ctx context.Context - ApiService *FunctionStoreAPIService -} - -func (r ApiReloadFunctionsRequest) Execute() (*http.Response, error) { - return r.ApiService.ReloadFunctionsExecute(r) -} - -/* -ReloadFunctions reload functions from the function store - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @return ApiReloadFunctionsRequest -*/ -func (a *FunctionStoreAPIService) ReloadFunctions(ctx context.Context) ApiReloadFunctionsRequest { - return ApiReloadFunctionsRequest{ - ApiService: a, - ctx: ctx, - } -} - -// Execute executes the request -func (a *FunctionStoreAPIService) ReloadFunctionsExecute(r ApiReloadFunctionsRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodGet - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "FunctionStoreAPIService.ReloadFunctions") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/function-store/reload" - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} diff --git a/admin/client/api_http_tube.go b/admin/client/api_http_tube.go deleted file mode 100644 index 4e8752ee..00000000 --- a/admin/client/api_http_tube.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// HttpTubeAPIService HttpTubeAPI service -type HttpTubeAPIService service - -type ApiTriggerHttpTubeEndpointRequest struct { - ctx context.Context - ApiService *HttpTubeAPIService - endpoint string - body *string -} - -func (r ApiTriggerHttpTubeEndpointRequest) Body(body string) ApiTriggerHttpTubeEndpointRequest { - r.body = &body - return r -} - -func (r ApiTriggerHttpTubeEndpointRequest) Execute() (*http.Response, error) { - return r.ApiService.TriggerHttpTubeEndpointExecute(r) -} - -/* -TriggerHttpTubeEndpoint trigger the http tube endpoint - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param endpoint Endpoint - @return ApiTriggerHttpTubeEndpointRequest -*/ -func (a *HttpTubeAPIService) TriggerHttpTubeEndpoint(ctx context.Context, endpoint string) ApiTriggerHttpTubeEndpointRequest { - return ApiTriggerHttpTubeEndpointRequest{ - ApiService: a, - ctx: ctx, - endpoint: endpoint, - } -} - -// Execute executes the request -func (a *HttpTubeAPIService) TriggerHttpTubeEndpointExecute(r ApiTriggerHttpTubeEndpointRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodPost - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "HttpTubeAPIService.TriggerHttpTubeEndpoint") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/http-tube/{endpoint}" - localVarPath = strings.Replace(localVarPath, "{"+"endpoint"+"}", url.PathEscape(parameterValueToString(r.endpoint, "endpoint")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - if r.body == nil { - return nil, reportError("body is required and must be specified") - } - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{"application/json"} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - // body params - localVarPostBody = r.body - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} diff --git a/admin/client/api_state.go b/admin/client/api_state.go deleted file mode 100644 index 09857746..00000000 --- a/admin/client/api_state.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// StateAPIService StateAPI service -type StateAPIService service - -type ApiGetStateRequest struct { - ctx context.Context - ApiService *StateAPIService - key string -} - -func (r ApiGetStateRequest) Execute() (string, *http.Response, error) { - return r.ApiService.GetStateExecute(r) -} - -/* -GetState get a state - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param key state key - @return ApiGetStateRequest -*/ -func (a *StateAPIService) GetState(ctx context.Context, key string) ApiGetStateRequest { - return ApiGetStateRequest{ - ApiService: a, - ctx: ctx, - key: key, - } -} - -// Execute executes the request -// -// @return string -func (a *StateAPIService) GetStateExecute(r ApiGetStateRequest) (string, *http.Response, error) { - var ( - localVarHTTPMethod = http.MethodGet - localVarPostBody interface{} - formFiles []formFile - localVarReturnValue string - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "StateAPIService.GetState") - if err != nil { - return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/state/{key}" - localVarPath = strings.Replace(localVarPath, "{"+"key"+"}", url.PathEscape(parameterValueToString(r.key, "key")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{"*/*"} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return localVarReturnValue, nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) - if err != nil { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: err.Error(), - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - return localVarReturnValue, localVarHTTPResponse, nil -} - -type ApiSetStateRequest struct { - ctx context.Context - ApiService *StateAPIService - key string - body *string -} - -func (r ApiSetStateRequest) Body(body string) ApiSetStateRequest { - r.body = &body - return r -} - -func (r ApiSetStateRequest) Execute() (*http.Response, error) { - return r.ApiService.SetStateExecute(r) -} - -/* -SetState set a state - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param key state key - @return ApiSetStateRequest -*/ -func (a *StateAPIService) SetState(ctx context.Context, key string) ApiSetStateRequest { - return ApiSetStateRequest{ - ApiService: a, - ctx: ctx, - key: key, - } -} - -// Execute executes the request -func (a *StateAPIService) SetStateExecute(r ApiSetStateRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodPost - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "StateAPIService.SetState") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/state/{key}" - localVarPath = strings.Replace(localVarPath, "{"+"key"+"}", url.PathEscape(parameterValueToString(r.key, "key")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - if r.body == nil { - return nil, reportError("body is required and must be specified") - } - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - // body params - localVarPostBody = r.body - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} diff --git a/admin/client/api_status.go b/admin/client/api_status.go deleted file mode 100644 index 36aa8692..00000000 --- a/admin/client/api_status.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" -) - -// StatusAPIService StatusAPI service -type StatusAPIService service - -type ApiGetStatusRequest struct { - ctx context.Context - ApiService *StatusAPIService -} - -func (r ApiGetStatusRequest) Execute() (*http.Response, error) { - return r.ApiService.GetStatusExecute(r) -} - -/* -GetStatus Get the status of the Function Stream - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @return ApiGetStatusRequest -*/ -func (a *StatusAPIService) GetStatus(ctx context.Context) ApiGetStatusRequest { - return ApiGetStatusRequest{ - ApiService: a, - ctx: ctx, - } -} - -// Execute executes the request -func (a *StatusAPIService) GetStatusExecute(r ApiGetStatusRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodGet - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "StatusAPIService.GetStatus") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/status" - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} diff --git a/admin/client/api_tube.go b/admin/client/api_tube.go deleted file mode 100644 index ab2c290a..00000000 --- a/admin/client/api_tube.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// TubeAPIService TubeAPI service -type TubeAPIService service - -type ApiConsumeMessageRequest struct { - ctx context.Context - ApiService *TubeAPIService - name string -} - -func (r ApiConsumeMessageRequest) Execute() (string, *http.Response, error) { - return r.ApiService.ConsumeMessageExecute(r) -} - -/* -ConsumeMessage consume a message - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param name tube name - @return ApiConsumeMessageRequest -*/ -func (a *TubeAPIService) ConsumeMessage(ctx context.Context, name string) ApiConsumeMessageRequest { - return ApiConsumeMessageRequest{ - ApiService: a, - ctx: ctx, - name: name, - } -} - -// Execute executes the request -// -// @return string -func (a *TubeAPIService) ConsumeMessageExecute(r ApiConsumeMessageRequest) (string, *http.Response, error) { - var ( - localVarHTTPMethod = http.MethodGet - localVarPostBody interface{} - formFiles []formFile - localVarReturnValue string - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "TubeAPIService.ConsumeMessage") - if err != nil { - return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/consume/{name}" - localVarPath = strings.Replace(localVarPath, "{"+"name"+"}", url.PathEscape(parameterValueToString(r.name, "name")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{"application/json"} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return localVarReturnValue, nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarReturnValue, localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) - if err != nil { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: err.Error(), - } - return localVarReturnValue, localVarHTTPResponse, newErr - } - - return localVarReturnValue, localVarHTTPResponse, nil -} - -type ApiProduceMessageRequest struct { - ctx context.Context - ApiService *TubeAPIService - name string - body *string -} - -func (r ApiProduceMessageRequest) Body(body string) ApiProduceMessageRequest { - r.body = &body - return r -} - -func (r ApiProduceMessageRequest) Execute() (*http.Response, error) { - return r.ApiService.ProduceMessageExecute(r) -} - -/* -ProduceMessage produce a message - - @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param name tube name - @return ApiProduceMessageRequest -*/ -func (a *TubeAPIService) ProduceMessage(ctx context.Context, name string) ApiProduceMessageRequest { - return ApiProduceMessageRequest{ - ApiService: a, - ctx: ctx, - name: name, - } -} - -// Execute executes the request -func (a *TubeAPIService) ProduceMessageExecute(r ApiProduceMessageRequest) (*http.Response, error) { - var ( - localVarHTTPMethod = http.MethodPost - localVarPostBody interface{} - formFiles []formFile - ) - - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "TubeAPIService.ProduceMessage") - if err != nil { - return nil, &GenericOpenAPIError{error: err.Error()} - } - - localVarPath := localBasePath + "/api/v1/produce/{name}" - localVarPath = strings.Replace(localVarPath, "{"+"name"+"}", url.PathEscape(parameterValueToString(r.name, "name")), -1) - - localVarHeaderParams := make(map[string]string) - localVarQueryParams := url.Values{} - localVarFormParams := url.Values{} - if r.body == nil { - return nil, reportError("body is required and must be specified") - } - - // to determine the Content-Type header - localVarHTTPContentTypes := []string{"application/json"} - - // set Content-Type header - localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) - if localVarHTTPContentType != "" { - localVarHeaderParams["Content-Type"] = localVarHTTPContentType - } - - // to determine the Accept header - localVarHTTPHeaderAccepts := []string{} - - // set Accept header - localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) - if localVarHTTPHeaderAccept != "" { - localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept - } - // body params - localVarPostBody = r.body - req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) - if err != nil { - return nil, err - } - - localVarHTTPResponse, err := a.client.callAPI(req) - if err != nil || localVarHTTPResponse == nil { - return localVarHTTPResponse, err - } - - localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) - localVarHTTPResponse.Body.Close() - localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) - if err != nil { - return localVarHTTPResponse, err - } - - if localVarHTTPResponse.StatusCode >= 300 { - newErr := &GenericOpenAPIError{ - body: localVarBody, - error: localVarHTTPResponse.Status, - } - return localVarHTTPResponse, newErr - } - - return localVarHTTPResponse, nil -} diff --git a/admin/client/client.go b/admin/client/client.go deleted file mode 100644 index 88da64e2..00000000 --- a/admin/client/client.go +++ /dev/null @@ -1,674 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "log" - "mime/multipart" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -var ( - JsonCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?json)`) - XmlCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?xml)`) - queryParamSplit = regexp.MustCompile(`(^|&)([^&]+)`) - queryDescape = strings.NewReplacer("%5B", "[", "%5D", "]") -) - -// APIClient manages communication with the Function Stream Service API v1.0.0 -// In most cases there should be only one, shared, APIClient. -type APIClient struct { - cfg *Configuration - common service // Reuse a single struct instead of allocating one for each service on the heap. - - // API Services - - FunctionAPI *FunctionAPIService - - FunctionStoreAPI *FunctionStoreAPIService - - HttpTubeAPI *HttpTubeAPIService - - StateAPI *StateAPIService - - StatusAPI *StatusAPIService - - TubeAPI *TubeAPIService -} - -type service struct { - client *APIClient -} - -// NewAPIClient creates a new API client. Requires a userAgent string describing your application. -// optionally a custom http.Client to allow for advanced features such as caching. -func NewAPIClient(cfg *Configuration) *APIClient { - if cfg.HTTPClient == nil { - cfg.HTTPClient = http.DefaultClient - } - - c := &APIClient{} - c.cfg = cfg - c.common.client = c - - // API Services - c.FunctionAPI = (*FunctionAPIService)(&c.common) - c.FunctionStoreAPI = (*FunctionStoreAPIService)(&c.common) - c.HttpTubeAPI = (*HttpTubeAPIService)(&c.common) - c.StateAPI = (*StateAPIService)(&c.common) - c.StatusAPI = (*StatusAPIService)(&c.common) - c.TubeAPI = (*TubeAPIService)(&c.common) - - return c -} - -func atoi(in string) (int, error) { - return strconv.Atoi(in) -} - -// selectHeaderContentType select a content type from the available list. -func selectHeaderContentType(contentTypes []string) string { - if len(contentTypes) == 0 { - return "" - } - if contains(contentTypes, "application/json") { - return "application/json" - } - return contentTypes[0] // use the first content type specified in 'consumes' -} - -// selectHeaderAccept join all accept types and return -func selectHeaderAccept(accepts []string) string { - if len(accepts) == 0 { - return "" - } - - if contains(accepts, "application/json") { - return "application/json" - } - - return strings.Join(accepts, ",") -} - -// contains is a case insensitive match, finding needle in a haystack -func contains(haystack []string, needle string) bool { - for _, a := range haystack { - if strings.EqualFold(a, needle) { - return true - } - } - return false -} - -// Verify optional parameters are of the correct type. -func typeCheckParameter(obj interface{}, expected string, name string) error { - // Make sure there is an object. - if obj == nil { - return nil - } - - // Check the type is as expected. - if reflect.TypeOf(obj).String() != expected { - return fmt.Errorf("expected %s to be of type %s but received %s", name, expected, reflect.TypeOf(obj).String()) - } - return nil -} - -func parameterValueToString(obj interface{}, key string) string { - if reflect.TypeOf(obj).Kind() != reflect.Ptr { - return fmt.Sprintf("%v", obj) - } - var param, ok = obj.(MappedNullable) - if !ok { - return "" - } - dataMap, err := param.ToMap() - if err != nil { - return "" - } - return fmt.Sprintf("%v", dataMap[key]) -} - -// parameterAddToHeaderOrQuery adds the provided object to the request header or url query -// supporting deep object syntax -func parameterAddToHeaderOrQuery(headerOrQueryParams interface{}, keyPrefix string, obj interface{}, collectionType string) { - var v = reflect.ValueOf(obj) - var value = "" - if v == reflect.ValueOf(nil) { - value = "null" - } else { - switch v.Kind() { - case reflect.Invalid: - value = "invalid" - - case reflect.Struct: - if t, ok := obj.(MappedNullable); ok { - dataMap, err := t.ToMap() - if err != nil { - return - } - parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, dataMap, collectionType) - return - } - if t, ok := obj.(time.Time); ok { - parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, t.Format(time.RFC3339Nano), collectionType) - return - } - value = v.Type().String() + " value" - case reflect.Slice: - var indValue = reflect.ValueOf(obj) - if indValue == reflect.ValueOf(nil) { - return - } - var lenIndValue = indValue.Len() - for i := 0; i < lenIndValue; i++ { - var arrayValue = indValue.Index(i) - parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, arrayValue.Interface(), collectionType) - } - return - - case reflect.Map: - var indValue = reflect.ValueOf(obj) - if indValue == reflect.ValueOf(nil) { - return - } - iter := indValue.MapRange() - for iter.Next() { - k, v := iter.Key(), iter.Value() - parameterAddToHeaderOrQuery(headerOrQueryParams, fmt.Sprintf("%s[%s]", keyPrefix, k.String()), v.Interface(), collectionType) - } - return - - case reflect.Interface: - fallthrough - case reflect.Ptr: - parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, v.Elem().Interface(), collectionType) - return - - case reflect.Int, reflect.Int8, reflect.Int16, - reflect.Int32, reflect.Int64: - value = strconv.FormatInt(v.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64, reflect.Uintptr: - value = strconv.FormatUint(v.Uint(), 10) - case reflect.Float32, reflect.Float64: - value = strconv.FormatFloat(v.Float(), 'g', -1, 32) - case reflect.Bool: - value = strconv.FormatBool(v.Bool()) - case reflect.String: - value = v.String() - default: - value = v.Type().String() + " value" - } - } - - switch valuesMap := headerOrQueryParams.(type) { - case url.Values: - if collectionType == "csv" && valuesMap.Get(keyPrefix) != "" { - valuesMap.Set(keyPrefix, valuesMap.Get(keyPrefix)+","+value) - } else { - valuesMap.Add(keyPrefix, value) - } - break - case map[string]string: - valuesMap[keyPrefix] = value - break - } -} - -// helper for converting interface{} parameters to json strings -func parameterToJson(obj interface{}) (string, error) { - jsonBuf, err := json.Marshal(obj) - if err != nil { - return "", err - } - return string(jsonBuf), err -} - -// callAPI do the request. -func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) { - if c.cfg.Debug { - dump, err := httputil.DumpRequestOut(request, true) - if err != nil { - return nil, err - } - log.Printf("\n%s\n", string(dump)) - } - - resp, err := c.cfg.HTTPClient.Do(request) - if err != nil { - return resp, err - } - - if c.cfg.Debug { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - return resp, err - } - log.Printf("\n%s\n", string(dump)) - } - return resp, err -} - -// Allow modification of underlying config for alternate implementations and testing -// Caution: modifying the configuration while live can cause data races and potentially unwanted behavior -func (c *APIClient) GetConfig() *Configuration { - return c.cfg -} - -type formFile struct { - fileBytes []byte - fileName string - formFileName string -} - -// prepareRequest build the request -func (c *APIClient) prepareRequest( - ctx context.Context, - path string, method string, - postBody interface{}, - headerParams map[string]string, - queryParams url.Values, - formParams url.Values, - formFiles []formFile) (localVarRequest *http.Request, err error) { - - var body *bytes.Buffer - - // Detect postBody type and post. - if postBody != nil { - contentType := headerParams["Content-Type"] - if contentType == "" { - contentType = detectContentType(postBody) - headerParams["Content-Type"] = contentType - } - - body, err = setBody(postBody, contentType) - if err != nil { - return nil, err - } - } - - // add form parameters and file if available. - if strings.HasPrefix(headerParams["Content-Type"], "multipart/form-data") && len(formParams) > 0 || (len(formFiles) > 0) { - if body != nil { - return nil, errors.New("Cannot specify postBody and multipart form at the same time.") - } - body = &bytes.Buffer{} - w := multipart.NewWriter(body) - - for k, v := range formParams { - for _, iv := range v { - if strings.HasPrefix(k, "@") { // file - err = addFile(w, k[1:], iv) - if err != nil { - return nil, err - } - } else { // form value - w.WriteField(k, iv) - } - } - } - for _, formFile := range formFiles { - if len(formFile.fileBytes) > 0 && formFile.fileName != "" { - w.Boundary() - part, err := w.CreateFormFile(formFile.formFileName, filepath.Base(formFile.fileName)) - if err != nil { - return nil, err - } - _, err = part.Write(formFile.fileBytes) - if err != nil { - return nil, err - } - } - } - - // Set the Boundary in the Content-Type - headerParams["Content-Type"] = w.FormDataContentType() - - // Set Content-Length - headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) - w.Close() - } - - if strings.HasPrefix(headerParams["Content-Type"], "application/x-www-form-urlencoded") && len(formParams) > 0 { - if body != nil { - return nil, errors.New("Cannot specify postBody and x-www-form-urlencoded form at the same time.") - } - body = &bytes.Buffer{} - body.WriteString(formParams.Encode()) - // Set Content-Length - headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) - } - - // Setup path and query parameters - url, err := url.Parse(path) - if err != nil { - return nil, err - } - - // Override request host, if applicable - if c.cfg.Host != "" { - url.Host = c.cfg.Host - } - - // Override request scheme, if applicable - if c.cfg.Scheme != "" { - url.Scheme = c.cfg.Scheme - } - - // Adding Query Param - query := url.Query() - for k, v := range queryParams { - for _, iv := range v { - query.Add(k, iv) - } - } - - // Encode the parameters. - url.RawQuery = queryParamSplit.ReplaceAllStringFunc(query.Encode(), func(s string) string { - pieces := strings.Split(s, "=") - pieces[0] = queryDescape.Replace(pieces[0]) - return strings.Join(pieces, "=") - }) - - // Generate a new request - if body != nil { - localVarRequest, err = http.NewRequest(method, url.String(), body) - } else { - localVarRequest, err = http.NewRequest(method, url.String(), nil) - } - if err != nil { - return nil, err - } - - // add header parameters, if any - if len(headerParams) > 0 { - headers := http.Header{} - for h, v := range headerParams { - headers[h] = []string{v} - } - localVarRequest.Header = headers - } - - // Add the user agent to the request. - localVarRequest.Header.Add("User-Agent", c.cfg.UserAgent) - - if ctx != nil { - // add context to the request - localVarRequest = localVarRequest.WithContext(ctx) - - // Walk through any authentication. - - } - - for header, value := range c.cfg.DefaultHeader { - localVarRequest.Header.Add(header, value) - } - return localVarRequest, nil -} - -func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err error) { - if len(b) == 0 { - return nil - } - if s, ok := v.(*string); ok { - *s = string(b) - return nil - } - if f, ok := v.(*os.File); ok { - f, err = os.CreateTemp("", "HttpClientFile") - if err != nil { - return - } - _, err = f.Write(b) - if err != nil { - return - } - _, err = f.Seek(0, io.SeekStart) - return - } - if f, ok := v.(**os.File); ok { - *f, err = os.CreateTemp("", "HttpClientFile") - if err != nil { - return - } - _, err = (*f).Write(b) - if err != nil { - return - } - _, err = (*f).Seek(0, io.SeekStart) - return - } - if XmlCheck.MatchString(contentType) { - if err = xml.Unmarshal(b, v); err != nil { - return err - } - return nil - } - if JsonCheck.MatchString(contentType) { - if actualObj, ok := v.(interface{ GetActualInstance() interface{} }); ok { // oneOf, anyOf schemas - if unmarshalObj, ok := actualObj.(interface{ UnmarshalJSON([]byte) error }); ok { // make sure it has UnmarshalJSON defined - if err = unmarshalObj.UnmarshalJSON(b); err != nil { - return err - } - } else { - return errors.New("Unknown type with GetActualInstance but no unmarshalObj.UnmarshalJSON defined") - } - } else if err = json.Unmarshal(b, v); err != nil { // simple model - return err - } - return nil - } - return errors.New("undefined response type") -} - -// Add a file to the multipart request -func addFile(w *multipart.Writer, fieldName, path string) error { - file, err := os.Open(filepath.Clean(path)) - if err != nil { - return err - } - err = file.Close() - if err != nil { - return err - } - - part, err := w.CreateFormFile(fieldName, filepath.Base(path)) - if err != nil { - return err - } - _, err = io.Copy(part, file) - - return err -} - -// Prevent trying to import "fmt" -func reportError(format string, a ...interface{}) error { - return fmt.Errorf(format, a...) -} - -// A wrapper for strict JSON decoding -func newStrictDecoder(data []byte) *json.Decoder { - dec := json.NewDecoder(bytes.NewBuffer(data)) - dec.DisallowUnknownFields() - return dec -} - -// Set request body from an interface{} -func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err error) { - if bodyBuf == nil { - bodyBuf = &bytes.Buffer{} - } - - if reader, ok := body.(io.Reader); ok { - _, err = bodyBuf.ReadFrom(reader) - } else if fp, ok := body.(*os.File); ok { - _, err = bodyBuf.ReadFrom(fp) - } else if b, ok := body.([]byte); ok { - _, err = bodyBuf.Write(b) - } else if s, ok := body.(string); ok { - _, err = bodyBuf.WriteString(s) - } else if s, ok := body.(*string); ok { - _, err = bodyBuf.WriteString(*s) - } else if JsonCheck.MatchString(contentType) { - err = json.NewEncoder(bodyBuf).Encode(body) - } else if XmlCheck.MatchString(contentType) { - var bs []byte - bs, err = xml.Marshal(body) - if err == nil { - bodyBuf.Write(bs) - } - } - - if err != nil { - return nil, err - } - - if bodyBuf.Len() == 0 { - err = fmt.Errorf("invalid body type %s\n", contentType) - return nil, err - } - return bodyBuf, nil -} - -// detectContentType method is used to figure out `Request.Body` content type for request header -func detectContentType(body interface{}) string { - contentType := "text/plain; charset=utf-8" - kind := reflect.TypeOf(body).Kind() - - switch kind { - case reflect.Struct, reflect.Map, reflect.Ptr: - contentType = "application/json; charset=utf-8" - case reflect.String: - contentType = "text/plain; charset=utf-8" - default: - if b, ok := body.([]byte); ok { - contentType = http.DetectContentType(b) - } else if kind == reflect.Slice { - contentType = "application/json; charset=utf-8" - } - } - - return contentType -} - -// Ripped from https://github.com/gregjones/httpcache/blob/master/httpcache.go -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// CacheExpires helper function to determine remaining time before repeating a request. -func CacheExpires(r *http.Response) time.Time { - // Figure out when the cache expires. - var expires time.Time - now, err := time.Parse(time.RFC1123, r.Header.Get("date")) - if err != nil { - return time.Now() - } - respCacheControl := parseCacheControl(r.Header) - - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err := time.ParseDuration(maxAge + "s") - if err != nil { - expires = now - } else { - expires = now.Add(lifetime) - } - } else { - expiresHeader := r.Header.Get("Expires") - if expiresHeader != "" { - expires, err = time.Parse(time.RFC1123, expiresHeader) - if err != nil { - expires = now - } - } - } - return expires -} - -func strlen(s string) int { - return utf8.RuneCountInString(s) -} - -// GenericOpenAPIError Provides access to the body, error and model on returned errors. -type GenericOpenAPIError struct { - body []byte - error string - model interface{} -} - -// Error returns non-empty string if there was an error. -func (e GenericOpenAPIError) Error() string { - return e.error -} - -// Body returns the raw bytes of the response -func (e GenericOpenAPIError) Body() []byte { - return e.body -} - -// Model returns the unpacked model of the error -func (e GenericOpenAPIError) Model() interface{} { - return e.model -} - -// format error message using title and detail when model implements rfc7807 -func formatErrorMessage(status string, v interface{}) string { - str := "" - metaValue := reflect.ValueOf(v).Elem() - - if metaValue.Kind() == reflect.Struct { - field := metaValue.FieldByName("Title") - if field != (reflect.Value{}) { - str = fmt.Sprintf("%s", field.Interface()) - } - - field = metaValue.FieldByName("Detail") - if field != (reflect.Value{}) { - str = fmt.Sprintf("%s (%s)", str, field.Interface()) - } - } - - return strings.TrimSpace(fmt.Sprintf("%s %s", status, str)) -} diff --git a/admin/client/configuration.go b/admin/client/configuration.go deleted file mode 100644 index 1c0c86f0..00000000 --- a/admin/client/configuration.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "context" - "fmt" - "net/http" - "strings" -) - -// contextKeys are used to identify the type of value in the context. -// Since these are string, it is possible to get a short description of the -// context key for logging and debugging using key.String(). - -type contextKey string - -func (c contextKey) String() string { - return "auth " + string(c) -} - -var ( - // ContextServerIndex uses a server configuration from the index. - ContextServerIndex = contextKey("serverIndex") - - // ContextOperationServerIndices uses a server configuration from the index mapping. - ContextOperationServerIndices = contextKey("serverOperationIndices") - - // ContextServerVariables overrides a server configuration variables. - ContextServerVariables = contextKey("serverVariables") - - // ContextOperationServerVariables overrides a server configuration variables using operation specific values. - ContextOperationServerVariables = contextKey("serverOperationVariables") -) - -// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth -type BasicAuth struct { - UserName string `json:"userName,omitempty"` - Password string `json:"password,omitempty"` -} - -// APIKey provides API key based authentication to a request passed via context using ContextAPIKey -type APIKey struct { - Key string - Prefix string -} - -// ServerVariable stores the information about a server variable -type ServerVariable struct { - Description string - DefaultValue string - EnumValues []string -} - -// ServerConfiguration stores the information about a server -type ServerConfiguration struct { - URL string - Description string - Variables map[string]ServerVariable -} - -// ServerConfigurations stores multiple ServerConfiguration items -type ServerConfigurations []ServerConfiguration - -// Configuration stores the configuration of the API client -type Configuration struct { - Host string `json:"host,omitempty"` - Scheme string `json:"scheme,omitempty"` - DefaultHeader map[string]string `json:"defaultHeader,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - Debug bool `json:"debug,omitempty"` - Servers ServerConfigurations - OperationServers map[string]ServerConfigurations - HTTPClient *http.Client -} - -// NewConfiguration returns a new Configuration object -func NewConfiguration() *Configuration { - cfg := &Configuration{ - DefaultHeader: make(map[string]string), - UserAgent: "OpenAPI-Generator/1.0.0/go", - Debug: false, - Servers: ServerConfigurations{ - { - URL: "http://localhost:7300", - Description: "No description provided", - }, - }, - OperationServers: map[string]ServerConfigurations{}, - } - return cfg -} - -// AddDefaultHeader adds a new HTTP header to the default header in the request -func (c *Configuration) AddDefaultHeader(key string, value string) { - c.DefaultHeader[key] = value -} - -// URL formats template on a index using given variables -func (sc ServerConfigurations) URL(index int, variables map[string]string) (string, error) { - if index < 0 || len(sc) <= index { - return "", fmt.Errorf("index %v out of range %v", index, len(sc)-1) - } - server := sc[index] - url := server.URL - - // go through variables and replace placeholders - for name, variable := range server.Variables { - if value, ok := variables[name]; ok { - found := bool(len(variable.EnumValues) == 0) - for _, enumValue := range variable.EnumValues { - if value == enumValue { - found = true - } - } - if !found { - return "", fmt.Errorf("the variable %s in the server URL has invalid value %v. Must be %v", name, value, variable.EnumValues) - } - url = strings.Replace(url, "{"+name+"}", value, -1) - } else { - url = strings.Replace(url, "{"+name+"}", variable.DefaultValue, -1) - } - } - return url, nil -} - -// ServerURL returns URL based on server settings -func (c *Configuration) ServerURL(index int, variables map[string]string) (string, error) { - return c.Servers.URL(index, variables) -} - -func getServerIndex(ctx context.Context) (int, error) { - si := ctx.Value(ContextServerIndex) - if si != nil { - if index, ok := si.(int); ok { - return index, nil - } - return 0, reportError("Invalid type %T should be int", si) - } - return 0, nil -} - -func getServerOperationIndex(ctx context.Context, endpoint string) (int, error) { - osi := ctx.Value(ContextOperationServerIndices) - if osi != nil { - if operationIndices, ok := osi.(map[string]int); !ok { - return 0, reportError("Invalid type %T should be map[string]int", osi) - } else { - index, ok := operationIndices[endpoint] - if ok { - return index, nil - } - } - } - return getServerIndex(ctx) -} - -func getServerVariables(ctx context.Context) (map[string]string, error) { - sv := ctx.Value(ContextServerVariables) - if sv != nil { - if variables, ok := sv.(map[string]string); ok { - return variables, nil - } - return nil, reportError("ctx value of ContextServerVariables has invalid type %T should be map[string]string", sv) - } - return nil, nil -} - -func getServerOperationVariables(ctx context.Context, endpoint string) (map[string]string, error) { - osv := ctx.Value(ContextOperationServerVariables) - if osv != nil { - if operationVariables, ok := osv.(map[string]map[string]string); !ok { - return nil, reportError("ctx value of ContextOperationServerVariables has invalid type %T should be map[string]map[string]string", osv) - } else { - variables, ok := operationVariables[endpoint] - if ok { - return variables, nil - } - } - } - return getServerVariables(ctx) -} - -// ServerURLWithContext returns a new server URL given an endpoint -func (c *Configuration) ServerURLWithContext(ctx context.Context, endpoint string) (string, error) { - sc, ok := c.OperationServers[endpoint] - if !ok { - sc = c.Servers - } - - if ctx == nil { - return sc.URL(0, nil) - } - - index, err := getServerOperationIndex(ctx, endpoint) - if err != nil { - return "", err - } - - variables, err := getServerOperationVariables(ctx, endpoint) - if err != nil { - return "", err - } - - return sc.URL(index, variables) -} diff --git a/admin/client/docs/ModelFunction.md b/admin/client/docs/ModelFunction.md deleted file mode 100644 index 46a729c3..00000000 --- a/admin/client/docs/ModelFunction.md +++ /dev/null @@ -1,208 +0,0 @@ -# ModelFunction - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**Config** | Pointer to **map[string]string** | | [optional] -**Name** | **string** | | -**Namespace** | Pointer to **string** | | [optional] -**Package** | **string** | | -**Replicas** | **int32** | | -**Runtime** | [**ModelRuntimeConfig**](ModelRuntimeConfig.md) | | -**Sink** | [**ModelTubeConfig**](ModelTubeConfig.md) | | -**Source** | [**[]ModelTubeConfig**](ModelTubeConfig.md) | | - -## Methods - -### NewModelFunction - -`func NewModelFunction(name string, package_ string, replicas int32, runtime ModelRuntimeConfig, sink ModelTubeConfig, source []ModelTubeConfig, ) *ModelFunction` - -NewModelFunction instantiates a new ModelFunction object -This constructor will assign default values to properties that have it defined, -and makes sure properties required by API are set, but the set of arguments -will change when the set of required properties is changed - -### NewModelFunctionWithDefaults - -`func NewModelFunctionWithDefaults() *ModelFunction` - -NewModelFunctionWithDefaults instantiates a new ModelFunction object -This constructor will only assign default values to properties that have it defined, -but it doesn't guarantee that properties required by API are set - -### GetConfig - -`func (o *ModelFunction) GetConfig() map[string]string` - -GetConfig returns the Config field if non-nil, zero value otherwise. - -### GetConfigOk - -`func (o *ModelFunction) GetConfigOk() (*map[string]string, bool)` - -GetConfigOk returns a tuple with the Config field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetConfig - -`func (o *ModelFunction) SetConfig(v map[string]string)` - -SetConfig sets Config field to given value. - -### HasConfig - -`func (o *ModelFunction) HasConfig() bool` - -HasConfig returns a boolean if a field has been set. - -### GetName - -`func (o *ModelFunction) GetName() string` - -GetName returns the Name field if non-nil, zero value otherwise. - -### GetNameOk - -`func (o *ModelFunction) GetNameOk() (*string, bool)` - -GetNameOk returns a tuple with the Name field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetName - -`func (o *ModelFunction) SetName(v string)` - -SetName sets Name field to given value. - - -### GetNamespace - -`func (o *ModelFunction) GetNamespace() string` - -GetNamespace returns the Namespace field if non-nil, zero value otherwise. - -### GetNamespaceOk - -`func (o *ModelFunction) GetNamespaceOk() (*string, bool)` - -GetNamespaceOk returns a tuple with the Namespace field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetNamespace - -`func (o *ModelFunction) SetNamespace(v string)` - -SetNamespace sets Namespace field to given value. - -### HasNamespace - -`func (o *ModelFunction) HasNamespace() bool` - -HasNamespace returns a boolean if a field has been set. - -### GetPackage - -`func (o *ModelFunction) GetPackage() string` - -GetPackage returns the Package field if non-nil, zero value otherwise. - -### GetPackageOk - -`func (o *ModelFunction) GetPackageOk() (*string, bool)` - -GetPackageOk returns a tuple with the Package field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetPackage - -`func (o *ModelFunction) SetPackage(v string)` - -SetPackage sets Package field to given value. - - -### GetReplicas - -`func (o *ModelFunction) GetReplicas() int32` - -GetReplicas returns the Replicas field if non-nil, zero value otherwise. - -### GetReplicasOk - -`func (o *ModelFunction) GetReplicasOk() (*int32, bool)` - -GetReplicasOk returns a tuple with the Replicas field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetReplicas - -`func (o *ModelFunction) SetReplicas(v int32)` - -SetReplicas sets Replicas field to given value. - - -### GetRuntime - -`func (o *ModelFunction) GetRuntime() ModelRuntimeConfig` - -GetRuntime returns the Runtime field if non-nil, zero value otherwise. - -### GetRuntimeOk - -`func (o *ModelFunction) GetRuntimeOk() (*ModelRuntimeConfig, bool)` - -GetRuntimeOk returns a tuple with the Runtime field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetRuntime - -`func (o *ModelFunction) SetRuntime(v ModelRuntimeConfig)` - -SetRuntime sets Runtime field to given value. - - -### GetSink - -`func (o *ModelFunction) GetSink() ModelTubeConfig` - -GetSink returns the Sink field if non-nil, zero value otherwise. - -### GetSinkOk - -`func (o *ModelFunction) GetSinkOk() (*ModelTubeConfig, bool)` - -GetSinkOk returns a tuple with the Sink field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetSink - -`func (o *ModelFunction) SetSink(v ModelTubeConfig)` - -SetSink sets Sink field to given value. - - -### GetSource - -`func (o *ModelFunction) GetSource() []ModelTubeConfig` - -GetSource returns the Source field if non-nil, zero value otherwise. - -### GetSourceOk - -`func (o *ModelFunction) GetSourceOk() (*[]ModelTubeConfig, bool)` - -GetSourceOk returns a tuple with the Source field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetSource - -`func (o *ModelFunction) SetSource(v []ModelTubeConfig)` - -SetSource sets Source field to given value. - - - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/admin/client/docs/ModelRuntimeConfig.md b/admin/client/docs/ModelRuntimeConfig.md deleted file mode 100644 index 5c40572d..00000000 --- a/admin/client/docs/ModelRuntimeConfig.md +++ /dev/null @@ -1,77 +0,0 @@ -# ModelRuntimeConfig - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**Config** | Pointer to **map[string]interface{}** | | [optional] -**Type** | **string** | | - -## Methods - -### NewModelRuntimeConfig - -`func NewModelRuntimeConfig(type_ string, ) *ModelRuntimeConfig` - -NewModelRuntimeConfig instantiates a new ModelRuntimeConfig object -This constructor will assign default values to properties that have it defined, -and makes sure properties required by API are set, but the set of arguments -will change when the set of required properties is changed - -### NewModelRuntimeConfigWithDefaults - -`func NewModelRuntimeConfigWithDefaults() *ModelRuntimeConfig` - -NewModelRuntimeConfigWithDefaults instantiates a new ModelRuntimeConfig object -This constructor will only assign default values to properties that have it defined, -but it doesn't guarantee that properties required by API are set - -### GetConfig - -`func (o *ModelRuntimeConfig) GetConfig() map[string]interface{}` - -GetConfig returns the Config field if non-nil, zero value otherwise. - -### GetConfigOk - -`func (o *ModelRuntimeConfig) GetConfigOk() (*map[string]interface{}, bool)` - -GetConfigOk returns a tuple with the Config field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetConfig - -`func (o *ModelRuntimeConfig) SetConfig(v map[string]interface{})` - -SetConfig sets Config field to given value. - -### HasConfig - -`func (o *ModelRuntimeConfig) HasConfig() bool` - -HasConfig returns a boolean if a field has been set. - -### GetType - -`func (o *ModelRuntimeConfig) GetType() string` - -GetType returns the Type field if non-nil, zero value otherwise. - -### GetTypeOk - -`func (o *ModelRuntimeConfig) GetTypeOk() (*string, bool)` - -GetTypeOk returns a tuple with the Type field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetType - -`func (o *ModelRuntimeConfig) SetType(v string)` - -SetType sets Type field to given value. - - - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/admin/client/docs/ModelTubeConfig.md b/admin/client/docs/ModelTubeConfig.md deleted file mode 100644 index 45b8a00b..00000000 --- a/admin/client/docs/ModelTubeConfig.md +++ /dev/null @@ -1,77 +0,0 @@ -# ModelTubeConfig - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**Config** | Pointer to **map[string]interface{}** | | [optional] -**Type** | **string** | | - -## Methods - -### NewModelTubeConfig - -`func NewModelTubeConfig(type_ string, ) *ModelTubeConfig` - -NewModelTubeConfig instantiates a new ModelTubeConfig object -This constructor will assign default values to properties that have it defined, -and makes sure properties required by API are set, but the set of arguments -will change when the set of required properties is changed - -### NewModelTubeConfigWithDefaults - -`func NewModelTubeConfigWithDefaults() *ModelTubeConfig` - -NewModelTubeConfigWithDefaults instantiates a new ModelTubeConfig object -This constructor will only assign default values to properties that have it defined, -but it doesn't guarantee that properties required by API are set - -### GetConfig - -`func (o *ModelTubeConfig) GetConfig() map[string]interface{}` - -GetConfig returns the Config field if non-nil, zero value otherwise. - -### GetConfigOk - -`func (o *ModelTubeConfig) GetConfigOk() (*map[string]interface{}, bool)` - -GetConfigOk returns a tuple with the Config field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetConfig - -`func (o *ModelTubeConfig) SetConfig(v map[string]interface{})` - -SetConfig sets Config field to given value. - -### HasConfig - -`func (o *ModelTubeConfig) HasConfig() bool` - -HasConfig returns a boolean if a field has been set. - -### GetType - -`func (o *ModelTubeConfig) GetType() string` - -GetType returns the Type field if non-nil, zero value otherwise. - -### GetTypeOk - -`func (o *ModelTubeConfig) GetTypeOk() (*string, bool)` - -GetTypeOk returns a tuple with the Type field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetType - -`func (o *ModelTubeConfig) SetType(v string)` - -SetType sets Type field to given value. - - - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/admin/client/docs/RestfulspecSchemaType.md b/admin/client/docs/RestfulspecSchemaType.md deleted file mode 100644 index d102355f..00000000 --- a/admin/client/docs/RestfulspecSchemaType.md +++ /dev/null @@ -1,72 +0,0 @@ -# RestfulspecSchemaType - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**Format** | **string** | | -**RawType** | **string** | | - -## Methods - -### NewRestfulspecSchemaType - -`func NewRestfulspecSchemaType(format string, rawType string, ) *RestfulspecSchemaType` - -NewRestfulspecSchemaType instantiates a new RestfulspecSchemaType object -This constructor will assign default values to properties that have it defined, -and makes sure properties required by API are set, but the set of arguments -will change when the set of required properties is changed - -### NewRestfulspecSchemaTypeWithDefaults - -`func NewRestfulspecSchemaTypeWithDefaults() *RestfulspecSchemaType` - -NewRestfulspecSchemaTypeWithDefaults instantiates a new RestfulspecSchemaType object -This constructor will only assign default values to properties that have it defined, -but it doesn't guarantee that properties required by API are set - -### GetFormat - -`func (o *RestfulspecSchemaType) GetFormat() string` - -GetFormat returns the Format field if non-nil, zero value otherwise. - -### GetFormatOk - -`func (o *RestfulspecSchemaType) GetFormatOk() (*string, bool)` - -GetFormatOk returns a tuple with the Format field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetFormat - -`func (o *RestfulspecSchemaType) SetFormat(v string)` - -SetFormat sets Format field to given value. - - -### GetRawType - -`func (o *RestfulspecSchemaType) GetRawType() string` - -GetRawType returns the RawType field if non-nil, zero value otherwise. - -### GetRawTypeOk - -`func (o *RestfulspecSchemaType) GetRawTypeOk() (*string, bool)` - -GetRawTypeOk returns a tuple with the RawType field if it's non-nil, zero value otherwise -and a boolean to check if the value has been set. - -### SetRawType - -`func (o *RestfulspecSchemaType) SetRawType(v string)` - -SetRawType sets RawType field to given value. - - - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/admin/client/model_model_function.go b/admin/client/model_model_function.go deleted file mode 100644 index 85922ee5..00000000 --- a/admin/client/model_model_function.go +++ /dev/null @@ -1,368 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "encoding/json" - "fmt" -) - -// checks if the ModelFunction type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ModelFunction{} - -// ModelFunction struct for ModelFunction -type ModelFunction struct { - Config *map[string]string `json:"config,omitempty"` - Name string `json:"name"` - Namespace *string `json:"namespace,omitempty"` - Package string `json:"package"` - Replicas int32 `json:"replicas"` - Runtime ModelRuntimeConfig `json:"runtime"` - Sink ModelTubeConfig `json:"sink"` - Source []ModelTubeConfig `json:"source"` -} - -type _ModelFunction ModelFunction - -// NewModelFunction instantiates a new ModelFunction object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewModelFunction(name string, package_ string, replicas int32, runtime ModelRuntimeConfig, sink ModelTubeConfig, source []ModelTubeConfig) *ModelFunction { - this := ModelFunction{} - this.Name = name - this.Package = package_ - this.Replicas = replicas - this.Runtime = runtime - this.Sink = sink - this.Source = source - return &this -} - -// NewModelFunctionWithDefaults instantiates a new ModelFunction object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewModelFunctionWithDefaults() *ModelFunction { - this := ModelFunction{} - return &this -} - -// GetConfig returns the Config field value if set, zero value otherwise. -func (o *ModelFunction) GetConfig() map[string]string { - if o == nil || IsNil(o.Config) { - var ret map[string]string - return ret - } - return *o.Config -} - -// GetConfigOk returns a tuple with the Config field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetConfigOk() (*map[string]string, bool) { - if o == nil || IsNil(o.Config) { - return nil, false - } - return o.Config, true -} - -// HasConfig returns a boolean if a field has been set. -func (o *ModelFunction) HasConfig() bool { - if o != nil && !IsNil(o.Config) { - return true - } - - return false -} - -// SetConfig gets a reference to the given map[string]string and assigns it to the Config field. -func (o *ModelFunction) SetConfig(v map[string]string) { - o.Config = &v -} - -// GetName returns the Name field value -func (o *ModelFunction) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *ModelFunction) SetName(v string) { - o.Name = v -} - -// GetNamespace returns the Namespace field value if set, zero value otherwise. -func (o *ModelFunction) GetNamespace() string { - if o == nil || IsNil(o.Namespace) { - var ret string - return ret - } - return *o.Namespace -} - -// GetNamespaceOk returns a tuple with the Namespace field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetNamespaceOk() (*string, bool) { - if o == nil || IsNil(o.Namespace) { - return nil, false - } - return o.Namespace, true -} - -// HasNamespace returns a boolean if a field has been set. -func (o *ModelFunction) HasNamespace() bool { - if o != nil && !IsNil(o.Namespace) { - return true - } - - return false -} - -// SetNamespace gets a reference to the given string and assigns it to the Namespace field. -func (o *ModelFunction) SetNamespace(v string) { - o.Namespace = &v -} - -// GetPackage returns the Package field value -func (o *ModelFunction) GetPackage() string { - if o == nil { - var ret string - return ret - } - - return o.Package -} - -// GetPackageOk returns a tuple with the Package field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetPackageOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Package, true -} - -// SetPackage sets field value -func (o *ModelFunction) SetPackage(v string) { - o.Package = v -} - -// GetReplicas returns the Replicas field value -func (o *ModelFunction) GetReplicas() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.Replicas -} - -// GetReplicasOk returns a tuple with the Replicas field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetReplicasOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.Replicas, true -} - -// SetReplicas sets field value -func (o *ModelFunction) SetReplicas(v int32) { - o.Replicas = v -} - -// GetRuntime returns the Runtime field value -func (o *ModelFunction) GetRuntime() ModelRuntimeConfig { - if o == nil { - var ret ModelRuntimeConfig - return ret - } - - return o.Runtime -} - -// GetRuntimeOk returns a tuple with the Runtime field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetRuntimeOk() (*ModelRuntimeConfig, bool) { - if o == nil { - return nil, false - } - return &o.Runtime, true -} - -// SetRuntime sets field value -func (o *ModelFunction) SetRuntime(v ModelRuntimeConfig) { - o.Runtime = v -} - -// GetSink returns the Sink field value -func (o *ModelFunction) GetSink() ModelTubeConfig { - if o == nil { - var ret ModelTubeConfig - return ret - } - - return o.Sink -} - -// GetSinkOk returns a tuple with the Sink field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetSinkOk() (*ModelTubeConfig, bool) { - if o == nil { - return nil, false - } - return &o.Sink, true -} - -// SetSink sets field value -func (o *ModelFunction) SetSink(v ModelTubeConfig) { - o.Sink = v -} - -// GetSource returns the Source field value -func (o *ModelFunction) GetSource() []ModelTubeConfig { - if o == nil { - var ret []ModelTubeConfig - return ret - } - - return o.Source -} - -// GetSourceOk returns a tuple with the Source field value -// and a boolean to check if the value has been set. -func (o *ModelFunction) GetSourceOk() ([]ModelTubeConfig, bool) { - if o == nil { - return nil, false - } - return o.Source, true -} - -// SetSource sets field value -func (o *ModelFunction) SetSource(v []ModelTubeConfig) { - o.Source = v -} - -func (o ModelFunction) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ModelFunction) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Config) { - toSerialize["config"] = o.Config - } - toSerialize["name"] = o.Name - if !IsNil(o.Namespace) { - toSerialize["namespace"] = o.Namespace - } - toSerialize["package"] = o.Package - toSerialize["replicas"] = o.Replicas - toSerialize["runtime"] = o.Runtime - toSerialize["sink"] = o.Sink - toSerialize["source"] = o.Source - return toSerialize, nil -} - -func (o *ModelFunction) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "name", - "package", - "replicas", - "runtime", - "sink", - "source", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err - } - - for _, requiredProperty := range requiredProperties { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varModelFunction := _ModelFunction{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varModelFunction) - - if err != nil { - return err - } - - *o = ModelFunction(varModelFunction) - - return err -} - -type NullableModelFunction struct { - value *ModelFunction - isSet bool -} - -func (v NullableModelFunction) Get() *ModelFunction { - return v.value -} - -func (v *NullableModelFunction) Set(val *ModelFunction) { - v.value = val - v.isSet = true -} - -func (v NullableModelFunction) IsSet() bool { - return v.isSet -} - -func (v *NullableModelFunction) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableModelFunction(val *ModelFunction) *NullableModelFunction { - return &NullableModelFunction{value: val, isSet: true} -} - -func (v NullableModelFunction) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableModelFunction) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/admin/client/model_model_runtime_config.go b/admin/client/model_model_runtime_config.go deleted file mode 100644 index 4298c0da..00000000 --- a/admin/client/model_model_runtime_config.go +++ /dev/null @@ -1,192 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "encoding/json" - "fmt" -) - -// checks if the ModelRuntimeConfig type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ModelRuntimeConfig{} - -// ModelRuntimeConfig struct for ModelRuntimeConfig -type ModelRuntimeConfig struct { - Config map[string]interface{} `json:"config,omitempty"` - Type string `json:"type"` -} - -type _ModelRuntimeConfig ModelRuntimeConfig - -// NewModelRuntimeConfig instantiates a new ModelRuntimeConfig object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewModelRuntimeConfig(type_ string) *ModelRuntimeConfig { - this := ModelRuntimeConfig{} - this.Type = type_ - return &this -} - -// NewModelRuntimeConfigWithDefaults instantiates a new ModelRuntimeConfig object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewModelRuntimeConfigWithDefaults() *ModelRuntimeConfig { - this := ModelRuntimeConfig{} - return &this -} - -// GetConfig returns the Config field value if set, zero value otherwise. -func (o *ModelRuntimeConfig) GetConfig() map[string]interface{} { - if o == nil || IsNil(o.Config) { - var ret map[string]interface{} - return ret - } - return o.Config -} - -// GetConfigOk returns a tuple with the Config field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ModelRuntimeConfig) GetConfigOk() (map[string]interface{}, bool) { - if o == nil || IsNil(o.Config) { - return map[string]interface{}{}, false - } - return o.Config, true -} - -// HasConfig returns a boolean if a field has been set. -func (o *ModelRuntimeConfig) HasConfig() bool { - if o != nil && !IsNil(o.Config) { - return true - } - - return false -} - -// SetConfig gets a reference to the given map[string]interface{} and assigns it to the Config field. -func (o *ModelRuntimeConfig) SetConfig(v map[string]interface{}) { - o.Config = v -} - -// GetType returns the Type field value -func (o *ModelRuntimeConfig) GetType() string { - if o == nil { - var ret string - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *ModelRuntimeConfig) GetTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *ModelRuntimeConfig) SetType(v string) { - o.Type = v -} - -func (o ModelRuntimeConfig) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ModelRuntimeConfig) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Config) { - toSerialize["config"] = o.Config - } - toSerialize["type"] = o.Type - return toSerialize, nil -} - -func (o *ModelRuntimeConfig) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "type", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err - } - - for _, requiredProperty := range requiredProperties { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varModelRuntimeConfig := _ModelRuntimeConfig{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varModelRuntimeConfig) - - if err != nil { - return err - } - - *o = ModelRuntimeConfig(varModelRuntimeConfig) - - return err -} - -type NullableModelRuntimeConfig struct { - value *ModelRuntimeConfig - isSet bool -} - -func (v NullableModelRuntimeConfig) Get() *ModelRuntimeConfig { - return v.value -} - -func (v *NullableModelRuntimeConfig) Set(val *ModelRuntimeConfig) { - v.value = val - v.isSet = true -} - -func (v NullableModelRuntimeConfig) IsSet() bool { - return v.isSet -} - -func (v *NullableModelRuntimeConfig) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableModelRuntimeConfig(val *ModelRuntimeConfig) *NullableModelRuntimeConfig { - return &NullableModelRuntimeConfig{value: val, isSet: true} -} - -func (v NullableModelRuntimeConfig) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableModelRuntimeConfig) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/admin/client/model_model_tube_config.go b/admin/client/model_model_tube_config.go deleted file mode 100644 index 62fe85be..00000000 --- a/admin/client/model_model_tube_config.go +++ /dev/null @@ -1,192 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "encoding/json" - "fmt" -) - -// checks if the ModelTubeConfig type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ModelTubeConfig{} - -// ModelTubeConfig struct for ModelTubeConfig -type ModelTubeConfig struct { - Config map[string]interface{} `json:"config,omitempty"` - Type string `json:"type"` -} - -type _ModelTubeConfig ModelTubeConfig - -// NewModelTubeConfig instantiates a new ModelTubeConfig object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewModelTubeConfig(type_ string) *ModelTubeConfig { - this := ModelTubeConfig{} - this.Type = type_ - return &this -} - -// NewModelTubeConfigWithDefaults instantiates a new ModelTubeConfig object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewModelTubeConfigWithDefaults() *ModelTubeConfig { - this := ModelTubeConfig{} - return &this -} - -// GetConfig returns the Config field value if set, zero value otherwise. -func (o *ModelTubeConfig) GetConfig() map[string]interface{} { - if o == nil || IsNil(o.Config) { - var ret map[string]interface{} - return ret - } - return o.Config -} - -// GetConfigOk returns a tuple with the Config field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ModelTubeConfig) GetConfigOk() (map[string]interface{}, bool) { - if o == nil || IsNil(o.Config) { - return map[string]interface{}{}, false - } - return o.Config, true -} - -// HasConfig returns a boolean if a field has been set. -func (o *ModelTubeConfig) HasConfig() bool { - if o != nil && !IsNil(o.Config) { - return true - } - - return false -} - -// SetConfig gets a reference to the given map[string]interface{} and assigns it to the Config field. -func (o *ModelTubeConfig) SetConfig(v map[string]interface{}) { - o.Config = v -} - -// GetType returns the Type field value -func (o *ModelTubeConfig) GetType() string { - if o == nil { - var ret string - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *ModelTubeConfig) GetTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *ModelTubeConfig) SetType(v string) { - o.Type = v -} - -func (o ModelTubeConfig) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ModelTubeConfig) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Config) { - toSerialize["config"] = o.Config - } - toSerialize["type"] = o.Type - return toSerialize, nil -} - -func (o *ModelTubeConfig) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "type", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err - } - - for _, requiredProperty := range requiredProperties { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varModelTubeConfig := _ModelTubeConfig{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varModelTubeConfig) - - if err != nil { - return err - } - - *o = ModelTubeConfig(varModelTubeConfig) - - return err -} - -type NullableModelTubeConfig struct { - value *ModelTubeConfig - isSet bool -} - -func (v NullableModelTubeConfig) Get() *ModelTubeConfig { - return v.value -} - -func (v *NullableModelTubeConfig) Set(val *ModelTubeConfig) { - v.value = val - v.isSet = true -} - -func (v NullableModelTubeConfig) IsSet() bool { - return v.isSet -} - -func (v *NullableModelTubeConfig) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableModelTubeConfig(val *ModelTubeConfig) *NullableModelTubeConfig { - return &NullableModelTubeConfig{value: val, isSet: true} -} - -func (v NullableModelTubeConfig) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableModelTubeConfig) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/admin/client/model_restfulspec_schema_type.go b/admin/client/model_restfulspec_schema_type.go deleted file mode 100644 index 9ec66446..00000000 --- a/admin/client/model_restfulspec_schema_type.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "bytes" - "encoding/json" - "fmt" -) - -// checks if the RestfulspecSchemaType type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &RestfulspecSchemaType{} - -// RestfulspecSchemaType struct for RestfulspecSchemaType -type RestfulspecSchemaType struct { - Format string `json:"Format"` - RawType string `json:"RawType"` -} - -type _RestfulspecSchemaType RestfulspecSchemaType - -// NewRestfulspecSchemaType instantiates a new RestfulspecSchemaType object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewRestfulspecSchemaType(format string, rawType string) *RestfulspecSchemaType { - this := RestfulspecSchemaType{} - this.Format = format - this.RawType = rawType - return &this -} - -// NewRestfulspecSchemaTypeWithDefaults instantiates a new RestfulspecSchemaType object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewRestfulspecSchemaTypeWithDefaults() *RestfulspecSchemaType { - this := RestfulspecSchemaType{} - return &this -} - -// GetFormat returns the Format field value -func (o *RestfulspecSchemaType) GetFormat() string { - if o == nil { - var ret string - return ret - } - - return o.Format -} - -// GetFormatOk returns a tuple with the Format field value -// and a boolean to check if the value has been set. -func (o *RestfulspecSchemaType) GetFormatOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Format, true -} - -// SetFormat sets field value -func (o *RestfulspecSchemaType) SetFormat(v string) { - o.Format = v -} - -// GetRawType returns the RawType field value -func (o *RestfulspecSchemaType) GetRawType() string { - if o == nil { - var ret string - return ret - } - - return o.RawType -} - -// GetRawTypeOk returns a tuple with the RawType field value -// and a boolean to check if the value has been set. -func (o *RestfulspecSchemaType) GetRawTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.RawType, true -} - -// SetRawType sets field value -func (o *RestfulspecSchemaType) SetRawType(v string) { - o.RawType = v -} - -func (o RestfulspecSchemaType) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o RestfulspecSchemaType) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["Format"] = o.Format - toSerialize["RawType"] = o.RawType - return toSerialize, nil -} - -func (o *RestfulspecSchemaType) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "Format", - "RawType", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err - } - - for _, requiredProperty := range requiredProperties { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varRestfulspecSchemaType := _RestfulspecSchemaType{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varRestfulspecSchemaType) - - if err != nil { - return err - } - - *o = RestfulspecSchemaType(varRestfulspecSchemaType) - - return err -} - -type NullableRestfulspecSchemaType struct { - value *RestfulspecSchemaType - isSet bool -} - -func (v NullableRestfulspecSchemaType) Get() *RestfulspecSchemaType { - return v.value -} - -func (v *NullableRestfulspecSchemaType) Set(val *RestfulspecSchemaType) { - v.value = val - v.isSet = true -} - -func (v NullableRestfulspecSchemaType) IsSet() bool { - return v.isSet -} - -func (v *NullableRestfulspecSchemaType) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableRestfulspecSchemaType(val *RestfulspecSchemaType) *NullableRestfulspecSchemaType { - return &NullableRestfulspecSchemaType{value: val, isSet: true} -} - -func (v NullableRestfulspecSchemaType) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableRestfulspecSchemaType) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/admin/client/response.go b/admin/client/response.go deleted file mode 100644 index 2e36f66d..00000000 --- a/admin/client/response.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "net/http" -) - -// APIResponse stores the API response returned by the server. -type APIResponse struct { - *http.Response `json:"-"` - Message string `json:"message,omitempty"` - // Operation is the name of the OpenAPI operation. - Operation string `json:"operation,omitempty"` - // RequestURL is the request URL. This value is always available, even if the - // embedded *http.Response is nil. - RequestURL string `json:"url,omitempty"` - // Method is the HTTP method used for the request. This value is always - // available, even if the embedded *http.Response is nil. - Method string `json:"method,omitempty"` - // Payload holds the contents of the response body (which may be nil or empty). - // This is provided here as the raw response.Body() reader will have already - // been drained. - Payload []byte `json:"-"` -} - -// NewAPIResponse returns a new APIResponse object. -func NewAPIResponse(r *http.Response) *APIResponse { - - response := &APIResponse{Response: r} - return response -} - -// NewAPIResponseWithError returns a new APIResponse object with the provided error message. -func NewAPIResponseWithError(errorMessage string) *APIResponse { - - response := &APIResponse{Message: errorMessage} - return response -} diff --git a/admin/client/utils.go b/admin/client/utils.go deleted file mode 100644 index be8dcda5..00000000 --- a/admin/client/utils.go +++ /dev/null @@ -1,347 +0,0 @@ -/* -Function Stream Service - -Manage Function Stream Resources - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package adminclient - -import ( - "encoding/json" - "reflect" - "time" -) - -// PtrBool is a helper routine that returns a pointer to given boolean value. -func PtrBool(v bool) *bool { return &v } - -// PtrInt is a helper routine that returns a pointer to given integer value. -func PtrInt(v int) *int { return &v } - -// PtrInt32 is a helper routine that returns a pointer to given integer value. -func PtrInt32(v int32) *int32 { return &v } - -// PtrInt64 is a helper routine that returns a pointer to given integer value. -func PtrInt64(v int64) *int64 { return &v } - -// PtrFloat32 is a helper routine that returns a pointer to given float value. -func PtrFloat32(v float32) *float32 { return &v } - -// PtrFloat64 is a helper routine that returns a pointer to given float value. -func PtrFloat64(v float64) *float64 { return &v } - -// PtrString is a helper routine that returns a pointer to given string value. -func PtrString(v string) *string { return &v } - -// PtrTime is helper routine that returns a pointer to given Time value. -func PtrTime(v time.Time) *time.Time { return &v } - -type NullableBool struct { - value *bool - isSet bool -} - -func (v NullableBool) Get() *bool { - return v.value -} - -func (v *NullableBool) Set(val *bool) { - v.value = val - v.isSet = true -} - -func (v NullableBool) IsSet() bool { - return v.isSet -} - -func (v *NullableBool) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBool(val *bool) *NullableBool { - return &NullableBool{value: val, isSet: true} -} - -func (v NullableBool) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBool) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt struct { - value *int - isSet bool -} - -func (v NullableInt) Get() *int { - return v.value -} - -func (v *NullableInt) Set(val *int) { - v.value = val - v.isSet = true -} - -func (v NullableInt) IsSet() bool { - return v.isSet -} - -func (v *NullableInt) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt(val *int) *NullableInt { - return &NullableInt{value: val, isSet: true} -} - -func (v NullableInt) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt32 struct { - value *int32 - isSet bool -} - -func (v NullableInt32) Get() *int32 { - return v.value -} - -func (v *NullableInt32) Set(val *int32) { - v.value = val - v.isSet = true -} - -func (v NullableInt32) IsSet() bool { - return v.isSet -} - -func (v *NullableInt32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt32(val *int32) *NullableInt32 { - return &NullableInt32{value: val, isSet: true} -} - -func (v NullableInt32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt64 struct { - value *int64 - isSet bool -} - -func (v NullableInt64) Get() *int64 { - return v.value -} - -func (v *NullableInt64) Set(val *int64) { - v.value = val - v.isSet = true -} - -func (v NullableInt64) IsSet() bool { - return v.isSet -} - -func (v *NullableInt64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt64(val *int64) *NullableInt64 { - return &NullableInt64{value: val, isSet: true} -} - -func (v NullableInt64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat32 struct { - value *float32 - isSet bool -} - -func (v NullableFloat32) Get() *float32 { - return v.value -} - -func (v *NullableFloat32) Set(val *float32) { - v.value = val - v.isSet = true -} - -func (v NullableFloat32) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat32(val *float32) *NullableFloat32 { - return &NullableFloat32{value: val, isSet: true} -} - -func (v NullableFloat32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat64 struct { - value *float64 - isSet bool -} - -func (v NullableFloat64) Get() *float64 { - return v.value -} - -func (v *NullableFloat64) Set(val *float64) { - v.value = val - v.isSet = true -} - -func (v NullableFloat64) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat64(val *float64) *NullableFloat64 { - return &NullableFloat64{value: val, isSet: true} -} - -func (v NullableFloat64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableString struct { - value *string - isSet bool -} - -func (v NullableString) Get() *string { - return v.value -} - -func (v *NullableString) Set(val *string) { - v.value = val - v.isSet = true -} - -func (v NullableString) IsSet() bool { - return v.isSet -} - -func (v *NullableString) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableString(val *string) *NullableString { - return &NullableString{value: val, isSet: true} -} - -func (v NullableString) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableString) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableTime struct { - value *time.Time - isSet bool -} - -func (v NullableTime) Get() *time.Time { - return v.value -} - -func (v *NullableTime) Set(val *time.Time) { - v.value = val - v.isSet = true -} - -func (v NullableTime) IsSet() bool { - return v.isSet -} - -func (v *NullableTime) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableTime(val *time.Time) *NullableTime { - return &NullableTime{value: val, isSet: true} -} - -func (v NullableTime) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableTime) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -// IsNil checks if an input is nil -func IsNil(i interface{}) bool { - if i == nil { - return true - } - switch reflect.TypeOf(i).Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: - return reflect.ValueOf(i).IsNil() - case reflect.Array: - return reflect.ValueOf(i).IsZero() - } - return false -} - -type MappedNullable interface { - ToMap() (map[string]interface{}, error) -} diff --git a/admin/utils/utils.go b/admin/utils/utils.go deleted file mode 100644 index fbd5e604..00000000 --- a/admin/utils/utils.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package utils - -import ( - "fmt" - - adminclient "github.com/functionstream/function-stream/admin/client" -) - -const ( - PulsarQueue string = "pulsar" - MemoryQueue string = "memory" -) - -func MakeMemorySourceTubeConfig(topics ...string) []adminclient.ModelTubeConfig { - return MakeQueueSourceTubeConfig(MemoryQueue, "fs", topics...) -} - -func MakePulsarSourceTubeConfig(topics ...string) []adminclient.ModelTubeConfig { - return MakeQueueSourceTubeConfig(PulsarQueue, "fs", topics...) -} - -func MakeQueueSourceTubeConfig(queueType string, subName string, topics ...string) []adminclient.ModelTubeConfig { - return []adminclient.ModelTubeConfig{ - { - Type: queueType, - Config: map[string]interface{}{ - "inputs": append([]string{}, topics...), - "subscription-name": subName, - }, - }, - } -} - -func MakeMemorySinkTubeConfig(topic string) *adminclient.ModelTubeConfig { - return MakeQueueSinkTubeConfig(MemoryQueue, topic) -} - -func MakePulsarSinkTubeConfig(topic string) *adminclient.ModelTubeConfig { - return MakeQueueSinkTubeConfig(PulsarQueue, topic) -} - -func MakeQueueSinkTubeConfig(queueType string, topic string) *adminclient.ModelTubeConfig { - return &adminclient.ModelTubeConfig{ - Type: queueType, - Config: map[string]interface{}{ - "output": topic, - }, - } -} - -func GetInputTopics(f *adminclient.ModelFunction) ([]string, error) { - if len(f.Source) < 1 { - return nil, fmt.Errorf("function %s has no sources", f.Name) - } - config := f.Source[0].Config - if len(config) < 1 { - return nil, fmt.Errorf("source config for function %s is empty", f.Name) - } - if topicList, ok := config["inputs"].([]string); ok { - return topicList, nil - } - return nil, fmt.Errorf("source config for function %s has no input topics", f.Name) -} - -func GetOutputTopic(f *adminclient.ModelFunction) (string, error) { - config := f.Sink.Config - if len(config) < 1 { - return "", fmt.Errorf("sink config for function %s is empty", f.Name) - } - if topic, ok := config["output"].(string); ok { - return topic, nil - } - return "", fmt.Errorf("sink config for function %s has no output topic", f.Name) -} diff --git a/apidocs.json b/apidocs.json deleted file mode 100644 index 50c6fb4c..00000000 --- a/apidocs.json +++ /dev/null @@ -1,442 +0,0 @@ -{ - "schemes": [ - "http" - ], - "swagger": "2.0", - "info": { - "description": "Manage Function Stream Resources", - "title": "Function Stream Service", - "contact": { - "name": "Function Stream Org", - "url": "https://github.com/FunctionStream" - }, - "license": { - "name": "Apache 2", - "url": "http://www.apache.org/licenses/" - }, - "version": "1.0.0" - }, - "host": "localhost:7300", - "paths": { - "/api/v1/consume/{name}": { - "get": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "tube" - ], - "summary": "consume a message", - "operationId": "consumeMessage", - "parameters": [ - { - "type": "string", - "description": "tube name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string", - "format": "byte" - } - } - } - } - }, - "/api/v1/function": { - "get": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "function" - ], - "summary": "get all functions", - "operationId": "getAllFunctions", - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "function" - ], - "summary": "create a function", - "operationId": "createFunction", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/model.Function" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/function-store/reload": { - "get": { - "tags": [ - "function-store" - ], - "summary": "reload functions from the function store", - "operationId": "reloadFunctions", - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/function/{namespace}/{name}": { - "delete": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "function" - ], - "summary": "delete a namespaced function", - "operationId": "deleteNamespacedFunction", - "parameters": [ - { - "type": "string", - "description": "name of the function", - "name": "name", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "namespace of the function", - "name": "namespace", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/function/{name}": { - "delete": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "function" - ], - "summary": "delete a function", - "operationId": "deleteFunction", - "parameters": [ - { - "type": "string", - "description": "name of the function", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/http-tube/{endpoint}": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "http-tube" - ], - "summary": "trigger the http tube endpoint", - "operationId": "triggerHttpTubeEndpoint", - "parameters": [ - { - "type": "string", - "description": "Endpoint", - "name": "endpoint", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "type": "string", - "format": "byte" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/produce/{name}": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "tube" - ], - "summary": "produce a message", - "operationId": "produceMessage", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "type": "string", - "format": "byte" - } - }, - { - "type": "string", - "description": "tube name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/state/{key}": { - "get": { - "tags": [ - "state" - ], - "summary": "get a state", - "operationId": "getState", - "parameters": [ - { - "type": "string", - "description": "state key", - "name": "key", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string", - "format": "byte" - } - } - } - }, - "post": { - "tags": [ - "state" - ], - "summary": "set a state", - "operationId": "setState", - "parameters": [ - { - "type": "string", - "description": "state key", - "name": "key", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "type": "string", - "format": "byte" - } - } - ], - "responses": { - "200": { - "description": "OK" - } - } - } - }, - "/api/v1/status": { - "get": { - "tags": [ - "status" - ], - "summary": "Get the status of the Function Stream", - "operationId": "getStatus", - "responses": { - "200": { - "description": "OK" - } - } - } - } - }, - "definitions": { - "model.Function": { - "required": [ - "name", - "package", - "runtime", - "source", - "sink", - "replicas" - ], - "properties": { - "config": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "package": { - "type": "string" - }, - "replicas": { - "type": "integer", - "format": "int32" - }, - "runtime": { - "$ref": "#/definitions/model.RuntimeConfig" - }, - "sink": { - "$ref": "#/definitions/model.TubeConfig" - }, - "source": { - "type": "array", - "items": { - "$ref": "#/definitions/model.TubeConfig" - } - } - } - }, - "model.RuntimeConfig": { - "required": [ - "type" - ], - "properties": { - "config": { - "type": "object" - }, - "type": { - "type": "string" - } - } - }, - "model.TubeConfig": { - "required": [ - "type" - ], - "properties": { - "config": { - "type": "object" - }, - "type": { - "type": "string" - } - } - }, - "restfulspec.SchemaType": { - "required": [ - "RawType", - "Format" - ], - "properties": { - "Format": { - "type": "string" - }, - "RawType": { - "type": "string" - } - } - } - }, - "tags": [ - { - "description": "Managing functions", - "name": "function" - }, - { - "description": "Managing tubes", - "name": "tube" - }, - { - "description": "Managing state", - "name": "state" - }, - { - "description": "Managing HTTP tubes", - "name": "http-tube" - }, - { - "description": "Managing function store", - "name": "function-store" - } - ] -} \ No newline at end of file diff --git a/benchmark/bench_test.go b/benchmark/bench_test.go deleted file mode 100644 index ed4fd491..00000000 --- a/benchmark/bench_test.go +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package benchmark - -import ( - "context" - "math/rand" - "os" - "runtime/pprof" - "strconv" - "testing" - "time" - - "github.com/functionstream/function-stream/common/config" - - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/runtime/wazero" - - "github.com/apache/pulsar-client-go/pulsaradmin" - "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - adminclient "github.com/functionstream/function-stream/admin/client" - adminutils "github.com/functionstream/function-stream/admin/utils" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/fs/contube" - "github.com/functionstream/function-stream/perf" - "github.com/functionstream/function-stream/server" -) - -func BenchmarkStressForBasicFunc(b *testing.B) { - s, err := server.NewDefaultServer() - if err != nil { - b.Fatal(err) - } - svrCtx, svrCancel := context.WithCancel(context.Background()) - go s.Run(svrCtx) - defer func() { - svrCancel() - }() - - inputTopic := "test-input-" + strconv.Itoa(rand.Int()) - outputTopic := "test-output-" + strconv.Itoa(rand.Int()) - cfg := &pulsaradmin.Config{} - admin, err := pulsaradmin.NewClient(cfg) - if err != nil { - panic(err) - } - replicas := int32(5) - createTopic := func(t string) { - tn, err := utils.GetTopicName(t) - if err != nil { - panic(err) - } - err = admin.Topics().Create(*tn, int(replicas)) - if err != nil { - panic(err) - } - - } - createTopic(inputTopic) - createTopic(outputTopic) - - pConfig := &perf.Config{ - PulsarURL: "pulsar://localhost:6650", - RequestRate: 200000.0, - Func: &adminclient.ModelFunction{ - Runtime: adminclient.ModelRuntimeConfig{ - Type: common.WASMRuntime, - Config: map[string]interface{}{ - common.RuntimeArchiveConfigKey: "../bin/example_basic.wasm", - }, - }, - Source: adminutils.MakePulsarSourceTubeConfig(inputTopic), - Sink: *adminutils.MakePulsarSinkTubeConfig(outputTopic), - Replicas: replicas, - }, - } - - b.ReportAllocs() - - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) - defer cancel() - - profile := b.Name() + ".pprof" - file, err := os.Create(profile) - if err != nil { - b.Fatal(err) - } - defer func() { - _ = file.Close() - }() - - err = pprof.StartCPUProfile(file) - if err != nil { - b.Fatal(err) - } - - <-s.WaitForReady(context.Background()) - perf.New(pConfig).Run(ctx) - - pprof.StopCPUProfile() -} - -func BenchmarkStressForBasicFuncWithMemoryQueue(b *testing.B) { - memoryQueueFactory := contube.NewMemoryQueueFactory(context.Background()) - - s, err := server.NewServer( - server.WithRuntimeFactoryBuilder(common.WASMRuntime, - func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - return wazero.NewWazeroFunctionRuntimeFactory(), nil - }), - server.WithTubeFactoryBuilder(common.MemoryTubeType, - func(configMap config.ConfigMap) (contube.TubeFactory, error) { - return memoryQueueFactory, nil - }), - ) - if err != nil { - b.Fatal(err) - } - svrCtx, svrCancel := context.WithCancel(context.Background()) - go s.Run(svrCtx) - defer func() { - svrCancel() - }() - - inputTopic := "test-input-" + strconv.Itoa(rand.Int()) - outputTopic := "test-output-" + strconv.Itoa(rand.Int()) - - replicas := int32(5) - - pConfig := &perf.Config{ - RequestRate: 200000.0, - Func: &adminclient.ModelFunction{ - Runtime: adminclient.ModelRuntimeConfig{ - Type: common.WASMRuntime, - Config: map[string]interface{}{ - common.RuntimeArchiveConfigKey: "../bin/example_basic.wasm", - }, - }, - Source: adminutils.MakeMemorySourceTubeConfig(inputTopic), - Sink: *adminutils.MakeMemorySinkTubeConfig(outputTopic), - Replicas: replicas, - }, - QueueBuilder: func(ctx context.Context) (contube.TubeFactory, error) { - return memoryQueueFactory, nil - }, - } - - b.ReportAllocs() - - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) - defer cancel() - - profile := b.Name() + ".pprof" - file, err := os.Create(profile) - if err != nil { - b.Fatal(err) - } - defer func() { - _ = file.Close() - }() - - err = pprof.StartCPUProfile(file) - if err != nil { - b.Fatal(err) - } - - <-s.WaitForReady(context.Background()) - perf.New(pConfig).Run(ctx) - - pprof.StopCPUProfile() -} diff --git a/cli/cli/Cargo.toml b/cli/cli/Cargo.toml new file mode 100644 index 00000000..5879dbb8 --- /dev/null +++ b/cli/cli/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "function-stream-cli" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "cli" +path = "src/main.rs" + +[dependencies] +arrow-array = "52" +arrow-ipc = "52" +arrow-schema = "52" +comfy-table = "7" +function-stream = { path = "../../" } +protocol = { path = "../../protocol" } +clap = { version = "4.5", features = ["derive"] } +thiserror = "2" +tokio = { version = "1.0", features = ["full"] } +tonic = { version = "0.12", features = ["default"] } +rustyline = { version = "14.0", features = ["with-dirs"] } +rustyline-derive = "0.8" + diff --git a/cli/cli/src/main.rs b/cli/cli/src/main.rs new file mode 100644 index 00000000..fdb0bdc7 --- /dev/null +++ b/cli/cli/src/main.rs @@ -0,0 +1,40 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod repl; + +use clap::Parser; +use repl::Repl; +use std::process; + +#[derive(Parser, Debug)] +#[command(name = "function-stream-cli")] +#[command(about = "Interactive SQL CLI for Function Stream", long_about = None)] +struct Args { + #[arg(short = 'i', long = "ip", default_value = "127.0.0.1")] + ip: String, + + #[arg(short = 'P', long, default_value = "8080")] + port: u16, +} + +#[tokio::main] +async fn main() { + let args = Args::parse(); + + let mut repl = Repl::new(args.ip.clone(), args.port); + + if let Err(e) = repl.run_async().await { + eprintln!("Error: {}", e); + process::exit(1); + } +} diff --git a/cli/cli/src/repl.rs b/cli/cli/src/repl.rs new file mode 100644 index 00000000..49f3cf8b --- /dev/null +++ b/cli/cli/src/repl.rs @@ -0,0 +1,353 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use arrow_array::{ + Array, BooleanArray, Float32Array, Float64Array, Int32Array, Int64Array, StringArray, +}; +use arrow_ipc::reader::StreamReader; +use arrow_schema::DataType; +use comfy_table::presets::UTF8_FULL; +use comfy_table::{Attribute, Cell, Color, ContentArrangement, Table, TableComponent}; +use protocol::cli::{function_stream_service_client::FunctionStreamServiceClient, SqlRequest}; +use rustyline::error::ReadlineError; +use rustyline::{Config, DefaultEditor, EditMode}; +use std::io::{self, Cursor, Write}; +use tonic::Request; + +#[derive(Debug, thiserror::Error)] +pub enum ReplError { + #[error("RPC error: {0}")] + Rpc(#[from] tonic::Status), + #[error("Connection failed: {0}")] + Connection(String), + #[error("Internal error: {0}")] + Internal(String), + #[error("IO error: {0}")] + Io(#[from] io::Error), +} + +pub struct Repl { + client: Option>, + server_host: String, + server_port: u16, + editor: Option, +} + +impl Repl { + pub fn new(server_host: String, server_port: u16) -> Self { + let config = Config::builder() + .history_ignore_space(true) + .edit_mode(EditMode::Emacs) + .build(); + + let editor = DefaultEditor::with_config(config).ok().map(|mut ed| { + let _ = ed.load_history(".function-stream-cli-history"); + ed + }); + + Self { + client: None, + server_host, + server_port, + editor, + } + } + + fn server_address(&self) -> String { + format!("http://{}:{}", self.server_host, self.server_port) + } + + pub async fn connect(&mut self) -> Result<(), ReplError> { + let addr = self.server_address(); + let client = FunctionStreamServiceClient::connect(addr.clone()) + .await + .map_err(|e| ReplError::Connection(format!("Connect failed to {}: {}", addr, e)))?; + self.client = Some(client); + Ok(()) + } + + pub async fn execute_sql(&mut self, sql: &str) -> Result<(), ReplError> { + let client = self + .client + .as_mut() + .ok_or_else(|| ReplError::Connection("Client not connected".to_string()))?; + + let response = client + .execute_sql(Request::new(SqlRequest { + sql: sql.to_string(), + })) + .await? + .into_inner(); + + // 1. Handle non-success status codes immediately + if response.status_code != 200 { + eprintln!("Error ({}): {}", response.status_code, response.message); + return Ok(()); + } + + // 2. Print the operational message (e.g., "CREATE FUNCTION successful") + let clean_msg = response.message.trim(); + if !clean_msg.is_empty() { + if clean_msg.ends_with("found") { + println!(" ✓ {}", clean_msg); + } else { + println!("{}", clean_msg); + } + } + + // 3. Strict Data Check: Only proceed if data is explicitly present and non-empty + if let Some(bytes) = response.data { + if !bytes.is_empty() { + // format_arrow_data returns Ok(Some(Table)) ONLY if row_count > 0 + match self.format_arrow_data(&bytes) { + Ok(Some(table)) => println!("{}", table), + Ok(None) => { + // Data was present but contained 0 rows (e.g., empty result set) + // We print nothing here to keep output clean as requested + } + Err(e) => eprintln!("Failed to parse result data: {}", e), + } + } + } + + Ok(()) + } + + fn format_arrow_data(&self, bytes: &[u8]) -> Result, ReplError> { + let cursor = Cursor::new(bytes); + let reader = match StreamReader::try_new(cursor, None) { + Ok(r) => r, + Err(_) => return Ok(None), + }; + + let mut table = Table::new(); + table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic) + // 表头下方用单线,不用双线 + .set_style(TableComponent::LeftHeaderIntersection, '├') + .set_style(TableComponent::HeaderLines, '─') + .set_style(TableComponent::MiddleHeaderIntersections, '┼') + .set_style(TableComponent::RightHeaderIntersection, '┤'); + + let mut has_rows = false; + let mut header_initialized = false; + + for batch_result in reader { + let batch = batch_result.map_err(|e| ReplError::Internal(e.to_string()))?; + if batch.num_rows() == 0 { + continue; + } + has_rows = true; + + if !header_initialized { + let headers: Vec = batch + .schema() + .fields() + .iter() + .map(|f| { + Cell::new(f.name()) + .fg(Color::Cyan) + .add_attribute(Attribute::Bold) + }) + .collect(); + table.set_header(headers); + header_initialized = true; + } + + let field_names: Vec = batch + .schema() + .fields() + .iter() + .map(|f| f.name().clone()) + .collect(); + + for row_idx in 0..batch.num_rows() { + let mut row_cells: Vec = Vec::new(); + for col_idx in 0..batch.num_columns() { + let cell_val = self.extract_value(batch.column(col_idx), row_idx); + let cell = if field_names.get(col_idx).map(|s| s.as_str()) == Some("status") { + let c = Cell::new(cell_val.as_str()); + match cell_val.as_str() { + "Running" => c.fg(Color::Green), + "Stopped" | "Failed" => c.fg(Color::Red), + _ => c, + } + } else { + Cell::new(cell_val) + }; + row_cells.push(cell); + } + table.add_row(row_cells); + } + } + + if has_rows { + Ok(Some(table)) + } else { + Ok(None) + } + } + + fn extract_value(&self, column: &dyn Array, row: usize) -> String { + if column.is_null(row) { + return "NULL".to_string(); + } + + match column.data_type() { + DataType::Utf8 | DataType::LargeUtf8 => column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + .to_string(), + DataType::Int32 => column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + .to_string(), + DataType::Int64 => column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + .to_string(), + DataType::Float32 => { + format!( + "{:.4}", + column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + ) + } + DataType::Float64 => { + format!( + "{:.4}", + column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + ) + } + DataType::Boolean => column + .as_any() + .downcast_ref::() + .unwrap() + .value(row) + .to_string(), + _ => "[unsupported]".to_string(), + } + } + + pub async fn run_async(&mut self) -> io::Result<()> { + println!("Function Stream SQL Interface"); + println!("Server: {}\n", self.server_address()); + + if let Err(e) = self.connect().await { + eprintln!("Error: {}", e); + return Ok(()); + } + + loop { + let input = match self.read_sql_input() { + Ok(sql) => sql, + Err(ReadlineError::Interrupted) => continue, + Err(ReadlineError::Eof) => break, + Err(e) => { + eprintln!("Read Error: {}", e); + break; + } + }; + + if input.trim().is_empty() { + continue; + } + + match input.trim().to_lowercase().as_str() { + "exit" | "quit" | "q" => break, + "help" | "h" => self.print_help(), + _ => { + if let Err(e) = self.execute_sql(&input).await { + eprintln!("SQL Execution Error: {}", e); + } + } + } + println!(); + } + + if let Some(ref mut ed) = self.editor { + let _ = ed.save_history(".function-stream-cli-history"); + } + Ok(()) + } + + fn read_sql_input(&mut self) -> Result { + let mut lines = Vec::new(); + loop { + let prompt = if lines.is_empty() { "sql> " } else { " -> " }; + + let line = match self.editor.as_mut() { + Some(ed) => { + let l = ed.readline(prompt)?; + if !l.trim().is_empty() { + ed.add_history_entry(l.as_str()).ok(); + } + l + } + None => { + print!("{}", prompt); + io::stdout().flush().unwrap(); + let mut l = String::new(); + io::stdin().read_line(&mut l).unwrap(); + l + } + }; + + lines.push(line); + let trimmed = lines.last().map(|s| s.trim()).unwrap_or(""); + + if trimmed.ends_with(';') || self.is_balanced(&lines.join(" ")) { + return Ok(lines.join(" ").trim().to_string()); + } + } + } + + fn is_balanced(&self, sql: &str) -> bool { + let open = sql.chars().filter(|&c| c == '(').count(); + let close = sql.chars().filter(|&c| c == ')').count(); + open == close && !sql.trim().is_empty() + } + + fn print_help(&self) { + let mut table = Table::new(); + table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic) + .set_style(TableComponent::LeftHeaderIntersection, '├') + .set_style(TableComponent::HeaderLines, '─') + .set_style(TableComponent::MiddleHeaderIntersections, '┼') + .set_style(TableComponent::RightHeaderIntersection, '┤') + .set_header( + vec!["Command", "Usage"] + .into_iter() + .map(|s| Cell::new(s).fg(Color::Cyan).add_attribute(Attribute::Bold)) + .collect::>(), + ) + .add_row(vec!["HELP", "Show this message"]) + .add_row(vec!["EXIT", "Close connection"]); + println!("{}", table); + } +} diff --git a/clients/gofs/api.go b/clients/gofs/api.go deleted file mode 100644 index 6ced2b8b..00000000 --- a/clients/gofs/api.go +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2024 DefFunction Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gofs - -import "context" - -type FunctionContext interface { - context.Context - GetState(ctx context.Context, key string) ([]byte, error) - PutState(ctx context.Context, key string, value []byte) error - Write(ctx context.Context, rawEvent Event[[]byte]) error - Read(ctx context.Context) (Event[[]byte], error) - GetConfig(ctx context.Context) (map[string]string, error) -} - -type Event[T any] interface { - Data() *T - Ack(ctx context.Context) error -} - -type BaseModule interface { - Init(ctx FunctionContext) error -} - -type Function[I any, O any] interface { - BaseModule - Handle(ctx FunctionContext, event Event[I]) (Event[O], error) -} - -type Source[O any] interface { - BaseModule - Handle(ctx FunctionContext, emit func(context.Context, Event[O]) error) error -} - -type Sink[I any] interface { - BaseModule - Handle(ctx FunctionContext, event Event[I]) error -} - -type Custom interface { - BaseModule - Handle(ctx FunctionContext) error -} - -type eventImpl[T any] struct { - data *T - ackFunc func(context.Context) error -} - -func NewEvent[T any](data *T) Event[T] { - return NewEventWithAck(data, nil) -} - -func NewEventWithAck[T any](data *T, ack func(ctx context.Context) error) Event[T] { - return &eventImpl[T]{ - data: data, - ackFunc: ack, - } -} - -func (e *eventImpl[T]) Data() *T { - return e.data -} - -func (e *eventImpl[T]) Ack(ctx context.Context) error { - if e.ackFunc != nil { - return e.ackFunc(ctx) - } - return nil -} - -type simpleFunction[I any, O any] struct { - handle func(ctx FunctionContext, event Event[I]) (Event[O], error) -} - -func NewSimpleFunction[I any, O any](handle func(ctx FunctionContext, event Event[I]) (Event[O], error)) Function[I, O] { - return &simpleFunction[I, O]{ - handle: handle, - } -} - -func (f *simpleFunction[I, O]) Init(_ FunctionContext) error { - return nil -} - -func (f *simpleFunction[I, O]) Handle(ctx FunctionContext, event Event[I]) (Event[O], error) { - return f.handle(ctx, event) -} - -type simpleSource[O any] struct { - handle func(ctx FunctionContext, emit func(context.Context, Event[O]) error) error -} - -func NewSimpleSource[O any](handle func(ctx FunctionContext, emit func(context.Context, Event[O]) error) error) Source[O] { - return &simpleSource[O]{ - handle: handle, - } -} - -func (s *simpleSource[O]) Init(_ FunctionContext) error { - return nil -} - -func (s *simpleSource[O]) Handle(ctx FunctionContext, emit func(context.Context, Event[O]) error) error { - return s.handle(ctx, emit) -} - -type simpleSink[I any] struct { - handle func(ctx FunctionContext, event Event[I]) error -} - -func NewSimpleSink[I any](handle func(ctx FunctionContext, event Event[I]) error) Sink[I] { - return &simpleSink[I]{ - handle: handle, - } -} - -func (s *simpleSink[I]) Init(_ FunctionContext) error { - return nil -} - -func (s *simpleSink[I]) Handle(ctx FunctionContext, event Event[I]) error { - return s.handle(ctx, event) -} - -type simpleCustom struct { - handle func(ctx FunctionContext) error -} - -func NewSimpleCustom(handle func(ctx FunctionContext) error) Custom { - return &simpleCustom{ - handle: handle, - } -} - -func (c *simpleCustom) Init(_ FunctionContext) error { - return nil -} - -func (c *simpleCustom) Handle(ctx FunctionContext) error { - return c.handle(ctx) -} diff --git a/clients/gofs/gofs.go b/clients/gofs/gofs.go deleted file mode 100644 index b17279d7..00000000 --- a/clients/gofs/gofs.go +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gofs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/wirelessr/avroschema" -) - -const ( - StateInit int32 = iota - StateRunning -) - -const ( - FSSocketPath = "FS_SOCKET_PATH" - FSFunctionName = "FS_FUNCTION_NAME" - FSModuleName = "FS_MODULE_NAME" - DefaultModule = "default" -) - -var ( - ErrRegisterModuleDuringRunning = fmt.Errorf("cannot register module during running") - ErrAlreadyRunning = fmt.Errorf("already running") -) - -type FSClient interface { - error - Register(module string, wrapper *moduleWrapper) FSClient - Run() error -} - -type fsClient struct { - rpc *fsRPCClient - modules map[string]*moduleWrapper - state int32 - registerMu sync.Mutex - err error -} - -func NewFSClient() FSClient { - return &fsClient{ - modules: make(map[string]*moduleWrapper), - state: StateInit, - } -} - -type moduleWrapper struct { - *fsClient - ctx *functionContextImpl - processFunc func(FunctionContext, []byte) ([]byte, error) // Only for Wasm Function - executeFunc func(FunctionContext) error - initFunc func(FunctionContext) error - registerErr error -} - -func (m *moduleWrapper) AddInitFunc(initFunc func(FunctionContext) error) *moduleWrapper { - parentInit := m.initFunc - if parentInit != nil { - m.initFunc = func(ctx FunctionContext) error { - err := parentInit(ctx) - if err != nil { - return err - } - return initFunc(ctx) - } - } else { - m.initFunc = initFunc - } - return m -} - -func (c *fsClient) Register(module string, wrapper *moduleWrapper) FSClient { - if c.err != nil { - return c - } - c.registerMu.Lock() - defer c.registerMu.Unlock() - if c.state == StateRunning { - c.err = ErrRegisterModuleDuringRunning - return c - } - if wrapper.registerErr != nil { - c.err = wrapper.registerErr - return c - } - c.modules[module] = wrapper - return c -} - -func WithFunction[I any, O any](function Function[I, O]) *moduleWrapper { - m := &moduleWrapper{} - processFunc := func(ctx FunctionContext, payload []byte) ([]byte, error) { // This is only for the wasm function - input := new(I) - err := json.Unmarshal(payload, input) - if err != nil { - return nil, fmt.Errorf("failed to parse JSON: %w", err) - } - output, err := function.Handle(ctx, NewEvent(input)) - if err != nil { - return nil, err - } - outputPayload, err := json.Marshal(output.Data()) - if err != nil { - return nil, fmt.Errorf("failed to marshal JSON: %w", err) - } - return outputPayload, nil - } - m.initFunc = func(ctx FunctionContext) error { - outputSchema, err := avroschema.Reflect(new(O)) - if err != nil { - return err - } - err = m.rpc.RegisterSchema(ctx, outputSchema) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - return function.Init(ctx) - } - m.executeFunc = func(ctx FunctionContext) error { - for { - inputPayload, err := ctx.Read(ctx) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Failed to read: %s\n", err) - time.Sleep(3 * time.Second) - continue - } - input := new(I) - err = json.Unmarshal(*inputPayload.Data(), input) - if err != nil { - return fmt.Errorf("failed to parse JSON: %w", err) - } - output, err := function.Handle(ctx, NewEventWithAck(input, inputPayload.Ack)) - if err != nil { - return err - } - outputPayload, err := json.Marshal(output.Data()) - if err != nil { - return fmt.Errorf("failed to marshal JSON: %w", err) - } - err = ctx.Write(ctx, NewEventWithAck(&outputPayload, func(ctx context.Context) error { - return errors.Join(inputPayload.Ack(ctx), output.Ack(ctx)) - })) - if err != nil { - return err - } - } - } - m.processFunc = processFunc - return m -} - -func WithSource[O any](source Source[O]) *moduleWrapper { - m := &moduleWrapper{} - emit := func(ctx context.Context, event Event[O]) error { - outputPayload, _ := json.Marshal(event.Data()) - return m.ctx.Write(ctx, NewEventWithAck(&outputPayload, event.Ack)) - } - m.initFunc = func(ctx FunctionContext) error { - outputSchema, err := avroschema.Reflect(new(O)) - if err != nil { - return err - } - err = m.rpc.RegisterSchema(ctx, outputSchema) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - return source.Init(ctx) - } - m.executeFunc = func(ctx FunctionContext) error { - return source.Handle(ctx, emit) - } - return m -} - -func WithSink[I any](sink Sink[I]) *moduleWrapper { - m := &moduleWrapper{} - m.initFunc = func(ctx FunctionContext) error { - inputSchema, err := avroschema.Reflect(new(I)) - if err != nil { - return err - } - err = m.rpc.RegisterSchema(ctx, inputSchema) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - return sink.Init(ctx) - } - m.executeFunc = func(ctx FunctionContext) error { - for { - inputPayload, err := ctx.Read(ctx) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Failed to read: %s\n", err) - time.Sleep(3 * time.Second) - continue - } - input := new(I) - err = json.Unmarshal(*inputPayload.Data(), input) - if err != nil { - return fmt.Errorf("failed to parse JSON: %w", err) - } - if err = sink.Handle(ctx, NewEventWithAck(input, inputPayload.Ack)); err != nil { - return err - } - } - } - return m -} - -func WithCustom(custom Custom) *moduleWrapper { - return &moduleWrapper{ - initFunc: func(ctx FunctionContext) error { - return custom.Init(ctx) - }, - executeFunc: func(ctx FunctionContext) error { - return custom.Handle(ctx) - }, - } -} - -type functionContextImpl struct { - context.Context - c *fsClient - name string - module string -} - -func (c *functionContextImpl) GetState(ctx context.Context, key string) ([]byte, error) { - return c.c.rpc.GetState(c.warpContext(ctx), key) -} - -func (c *functionContextImpl) PutState(ctx context.Context, key string, value []byte) error { - return c.c.rpc.PutState(c.warpContext(ctx), key, value) -} - -func (c *functionContextImpl) Write(ctx context.Context, rawEvent Event[[]byte]) error { - return c.c.rpc.Write(c.warpContext(ctx), rawEvent) -} - -func (c *functionContextImpl) Read(ctx context.Context) (Event[[]byte], error) { - return c.c.rpc.Read(c.warpContext(ctx)) -} - -func (c *functionContextImpl) GetConfig(ctx context.Context) (map[string]string, error) { - return c.c.rpc.GetConfig(c.warpContext(ctx)) -} - -type funcCtxKey struct{} - -func (c *fsClient) Run() error { - if c.err != nil { - return c.err - } - c.registerMu.Lock() - if c.state == StateRunning { - c.registerMu.Unlock() - return ErrAlreadyRunning - } - c.state = StateRunning - c.registerMu.Unlock() - - funcName := os.Getenv(FSFunctionName) - if funcName == "" { - return fmt.Errorf("%s is not set", FSFunctionName) - } - module := os.Getenv(FSModuleName) - if module == "" { - module = DefaultModule - } - m, ok := c.modules[module] - if !ok { - return fmt.Errorf("module %s not found", module) - } - funcCtx := &functionContextImpl{c: c, name: funcName, module: module} - if c.rpc == nil { - rpc, err := newFSRPCClient() - if err != nil { - return err - } - c.rpc = rpc - } - ctx := funcCtx.warpContext(context.WithValue(context.Background(), funcCtxKey{}, funcCtx)) - funcCtx.Context = ctx - m.fsClient = c - m.ctx = funcCtx - err := m.initFunc(funcCtx) - if err != nil { - return err - } - c.rpc.loadModule(m) - if c.rpc.skipExecuting() { - return nil - } - return m.executeFunc(funcCtx) -} - -func (c *fsClient) Error() string { - return c.err.Error() -} diff --git a/clients/gofs/gofs_socket.go b/clients/gofs/gofs_socket.go deleted file mode 100644 index a474e6a2..00000000 --- a/clients/gofs/gofs_socket.go +++ /dev/null @@ -1,156 +0,0 @@ -//go:build !wasi -// +build !wasi - -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gofs - -import ( - "context" - "fmt" - "os" - "strings" - - "google.golang.org/grpc/metadata" - - "github.com/functionstream/function-stream/fs/runtime/external/model" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -func (c *functionContextImpl) warpContext(parent context.Context) context.Context { - return metadata.NewOutgoingContext(parent, metadata.New(map[string]string{ - "name": c.name, - })) -} - -func passMetadataContext(from context.Context, to context.Context) context.Context { - md, ok := metadata.FromOutgoingContext(from) - if ok { - return metadata.NewOutgoingContext(to, md) - } - return to -} - -type fsRPCClient struct { - grpcCli model.FunctionClient -} - -func newFSRPCClient() (*fsRPCClient, error) { - socketPath := os.Getenv(FSSocketPath) - if socketPath == "" { - return nil, fmt.Errorf("%s is not set", FSSocketPath) - } - - serviceConfig := `{ - "methodConfig": [{ - "name": [{"service": "*"}], - "retryPolicy": { - "maxAttempts": 30, - "initialBackoff": "0.1s", - "maxBackoff": "30s", - "backoffMultiplier": 2, - "retryableStatusCodes": ["UNAVAILABLE"] - } - }] - }` - conn, err := grpc.NewClient( - "unix:"+socketPath, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultServiceConfig(serviceConfig), - ) - if err != nil { - return nil, err - } - client := model.NewFunctionClient(conn) - return &fsRPCClient{grpcCli: client}, nil -} - -func (c *fsRPCClient) RegisterSchema(ctx context.Context, schema string) error { - _, err := c.grpcCli.RegisterSchema(ctx, &model.RegisterSchemaRequest{Schema: schema}) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - return nil -} - -func (c *fsRPCClient) Write(ctx context.Context, event Event[[]byte]) error { - _, err := c.grpcCli.Write(ctx, &model.Event{Payload: *event.Data()}) - if err != nil { - return fmt.Errorf("failed to write: %w", err) - } - return event.Ack(ctx) -} - -func (c *fsRPCClient) Read(ctx context.Context) (Event[[]byte], error) { - res, err := c.grpcCli.Read(ctx, &model.ReadRequest{}) - if err != nil { - return nil, fmt.Errorf("failed to read: %w", err) - } - return NewEventWithAck(&res.Payload, func(ackCtx context.Context) error { - if _, err := c.grpcCli.Ack(passMetadataContext(ctx, ackCtx), &model.AckRequest{ - Id: res.Id, - }); err != nil { - return err - } - return nil - }), nil -} - -func (c *fsRPCClient) PutState(ctx context.Context, key string, value []byte) error { - _, err := c.grpcCli.PutState(ctx, &model.PutStateRequest{Key: key, Value: value}) - if err != nil { - return err - } - return nil -} - -func (c *fsRPCClient) GetState(ctx context.Context, key string) ([]byte, error) { - res, err := c.grpcCli.GetState(ctx, &model.GetStateRequest{Key: key}) - if err != nil { - return nil, err - } - return res.Value, nil -} - -func (c *fsRPCClient) ListStates(ctx context.Context, path string) ([]string, error) { - path = strings.TrimSuffix(path, "/") - startInclusive := path + "/" - endExclusive := path + "//" - res, err := c.grpcCli.ListStates(ctx, &model.ListStatesRequest{StartInclusive: startInclusive, - EndExclusive: endExclusive}) - if err != nil { - return nil, err - } - return res.Keys, nil -} - -func (c *fsRPCClient) GetConfig(ctx context.Context) (map[string]string, error) { - res, err := c.grpcCli.GetConfig(ctx, &model.GetConfigRequest{}) - if err != nil { - return nil, err - } - return res.Config, nil -} - -func (c *fsRPCClient) loadModule(_ *moduleWrapper) { - // no-op -} - -func (c *fsRPCClient) skipExecuting() bool { - return false -} diff --git a/clients/gofs/gofs_wasmfs.go b/clients/gofs/gofs_wasmfs.go deleted file mode 100644 index 7404b8a9..00000000 --- a/clients/gofs/gofs_wasmfs.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build wasi -// +build wasi - -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gofs - -import "C" -import ( - "context" - "fmt" - "os" - "syscall" -) - -var processFd int -var registerSchemaFd int - -func init() { - processFd, _ = syscall.Open("/process", syscall.O_RDWR, 0) - registerSchemaFd, _ = syscall.Open("/registerSchema", syscall.O_RDWR, 0) -} - -var runningModule *moduleWrapper - -//export process -func process() { - if runningModule == nil { - panic("no module loaded") - } - err := runningModule.executeFunc(runningModule.ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - } -} - -func (c *functionContextImpl) warpContext(parent context.Context) context.Context { - return parent -} - -type fsRPCClient struct { -} - -func newFSRPCClient() (*fsRPCClient, error) { - return &fsRPCClient{}, nil -} - -func (c *fsRPCClient) RegisterSchema(ctx context.Context, schema string) error { - _, err := syscall.Write(registerSchemaFd, []byte(schema)) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - return nil -} - -func (c *fsRPCClient) Write(ctx context.Context, event Event[[]byte]) error { - panic("rpc write not supported") -} - -func (c *fsRPCClient) Read(ctx context.Context) (Event[[]byte], error) { - panic("rpc read not supported") -} - -func (c *fsRPCClient) GetState(ctx context.Context, key string) ([]byte, error) { - panic("rpc get state not supported") -} - -func (c *fsRPCClient) PutState(ctx context.Context, key string, value []byte) error { - panic("rpc put state not supported") -} - -func (c *fsRPCClient) GetConfig(ctx context.Context) (map[string]string, error) { - panic("rpc get config not supported") -} - -func (c *fsRPCClient) loadModule(m *moduleWrapper) { - if m.processFunc == nil { - panic("only function module is supported for the wasm runtime") - } - m.executeFunc = func(ctx FunctionContext) error { - var stat syscall.Stat_t - syscall.Fstat(processFd, &stat) - payload := make([]byte, stat.Size) - _, err := syscall.Read(processFd, payload) - if err != nil { - return fmt.Errorf("failed to read: %w", err) - } - outputPayload, err := m.processFunc(ctx, payload) - if err != nil { - return fmt.Errorf("failed to process: %w", err) - } - _, err = syscall.Write(processFd, outputPayload) - if err != nil { - return fmt.Errorf("failed to write: %w", err) - } - return nil - } - runningModule = m -} - -func (c *fsRPCClient) skipExecuting() bool { - return true -} diff --git a/cmd/client/cmd.go b/cmd/client/cmd.go deleted file mode 100644 index 222b09b4..00000000 --- a/cmd/client/cmd.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package client - -import ( - c "github.com/functionstream/function-stream/cmd/client/common" - "github.com/functionstream/function-stream/cmd/client/consume" - "github.com/functionstream/function-stream/cmd/client/create" - del "github.com/functionstream/function-stream/cmd/client/delete" - "github.com/functionstream/function-stream/cmd/client/list" - "github.com/functionstream/function-stream/cmd/client/produce" - "github.com/functionstream/function-stream/cmd/client/reload" - "github.com/spf13/cobra" -) - -var ( - Cmd = &cobra.Command{ - Use: "client", - Short: "Function Stream Client Tool", - Long: `Operations to manage functions in a function stream server`, - } -) - -func init() { - Cmd.PersistentFlags().StringVarP(&c.Config.ServiceAddr, "service-address", "s", - "http://localhost:7300", "Service address") - - Cmd.AddCommand(create.Cmd) - Cmd.AddCommand(list.Cmd) - Cmd.AddCommand(del.Cmd) - Cmd.AddCommand(produce.Cmd) - Cmd.AddCommand(consume.Cmd) - Cmd.AddCommand(reload.Cmd) -} diff --git a/cmd/client/common/config.go b/cmd/client/common/config.go deleted file mode 100644 index 5488fd32..00000000 --- a/cmd/client/common/config.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -type ClientConfig struct { - ServiceAddr string -} - -var Config ClientConfig diff --git a/cmd/client/consume/cmd.go b/cmd/client/consume/cmd.go deleted file mode 100644 index 3bc4c430..00000000 --- a/cmd/client/consume/cmd.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package consume - -import ( - "fmt" - "os" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/cmd/client/common" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -var ( - config = flags{} -) - -type flags struct { - name string -} - -var Cmd = &cobra.Command{ - Use: "consume", - Short: "Consume an event", - Long: `Consume an event from a queue`, - Args: cobra.NoArgs, - Run: exec, -} - -func init() { - Cmd.Flags().StringVarP(&config.name, "name", "n", "", "The name of the queue") - Cmd.MarkFlagsRequiredTogether("name") -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - cli := adminclient.NewAPIClient(cfg) - - e, res, err := cli.TubeAPI.ConsumeMessage(context.Background(), config.name).Execute() - if err != nil { - fmt.Printf("Failed to consume event: %v\n", err) - os.Exit(1) - } - if res.StatusCode != 200 { - fmt.Printf("Failed to consume event: %v\n", res.Status) - os.Exit(1) - } - fmt.Printf("%s\n", e) -} diff --git a/cmd/client/create/cmd.go b/cmd/client/create/cmd.go deleted file mode 100644 index 37d84cfa..00000000 --- a/cmd/client/create/cmd.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package create - -import ( - "context" - "fmt" - "io" - "os" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/admin/utils" - "github.com/functionstream/function-stream/cmd/client/common" - fs_cmmon "github.com/functionstream/function-stream/common" - "github.com/spf13/cobra" -) - -var ( - config = flags{} -) - -type flags struct { - name string - archive string - inputs []string - output string - replica int32 -} - -var Cmd = &cobra.Command{ - Use: "create", - Short: "Create Function", - Long: `Create a function on the function stream server`, - Args: cobra.NoArgs, - Run: exec, -} - -func init() { - Cmd.Flags().StringVarP(&config.name, "name", "n", "", "The name of the function") - Cmd.Flags().StringVarP(&config.archive, "archive", "a", "", "The archive path of the function") - Cmd.Flags().StringSliceVarP(&config.inputs, "inputs", "i", []string{}, "The inputs of the function") - Cmd.Flags().StringVarP(&config.output, "output", "o", "", "The output of the function") - Cmd.Flags().Int32VarP(&config.replica, "replica", "r", 1, "The replica of the function") - - Cmd.MarkFlagsRequiredTogether("name") -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - cli := adminclient.NewAPIClient(cfg) - f := adminclient.ModelFunction{ - Name: config.name, - Runtime: adminclient.ModelRuntimeConfig{ - Type: fs_cmmon.WASMRuntime, - Config: map[string]interface{}{ - fs_cmmon.RuntimeArchiveConfigKey: config.archive, - }}, - Source: utils.MakeMemorySourceTubeConfig(config.inputs...), - Sink: *utils.MakeMemorySinkTubeConfig(config.output), - Replicas: config.replica, - } - - res, err := cli.FunctionAPI.CreateFunction(context.Background()).Body(f).Execute() - if err != nil { - if res != nil { - body, e := io.ReadAll(res.Body) - if e != nil { - fmt.Printf("Failed to create function: %v\n", err) - } else { - fmt.Printf("Failed to create function: %v, %s\n", err, string(body)) - } - } else { - fmt.Printf("Failed to create function: %v\n", err) - } - os.Exit(1) - } - if res.StatusCode != 200 { - fmt.Printf("Failed to create function with status code: %d\n", res.StatusCode) - os.Exit(1) - } -} diff --git a/cmd/client/delete/cmd.go b/cmd/client/delete/cmd.go deleted file mode 100644 index 095a7ba5..00000000 --- a/cmd/client/delete/cmd.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package del - -import ( - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/cmd/client/common" - "github.com/spf13/cobra" -) - -var ( - config = flags{} -) - -type flags struct { - name string -} - -var Cmd = &cobra.Command{ - Use: "delete", - Short: "Delete Function", - Long: `Delete a function on the function stream server`, - Args: cobra.NoArgs, - Run: exec, -} - -func init() { - Cmd.Flags().StringVarP(&config.name, "name", "n", "", "The name of the function") - Cmd.MarkFlagsRequiredTogether("name") -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - _ = adminclient.NewAPIClient(cfg) - - //res, err := cli.DefaultAPI.ApiV1FunctionFunctionNameDelete(context.Background(), config.name).Execute() - //if err != nil { - // fmt.Printf("Failed to delete function: %v\n", err) - // os.Exit(1) - //} - //if res.StatusCode != 200 { - // fmt.Printf("Failed to delete function with status code: %d\n", res.StatusCode) - // os.Exit(1) - //} -} diff --git a/cmd/client/list/cmd.go b/cmd/client/list/cmd.go deleted file mode 100644 index b6a617e9..00000000 --- a/cmd/client/list/cmd.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package list - -import ( - "context" - "fmt" - "os" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/cmd/client/common" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "list", - Short: "List All Functions", - Long: `List all functions on the function stream server`, - Args: cobra.NoArgs, - Run: exec, -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - cli := adminclient.NewAPIClient(cfg) - - list, res, err := cli.FunctionAPI.GetAllFunctions(context.Background()).Execute() - if err != nil { - fmt.Printf("Failed to list functions: %v\n", err) - os.Exit(1) - } - if res.StatusCode != 200 { - fmt.Printf("Failed to list functions with status code: %d\n", res.StatusCode) - os.Exit(1) - } - for _, f := range list { - fmt.Println(f) - } -} diff --git a/cmd/client/produce/cmd.go b/cmd/client/produce/cmd.go deleted file mode 100644 index 59c0ff92..00000000 --- a/cmd/client/produce/cmd.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package produce - -import ( - "fmt" - "os" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/cmd/client/common" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -var ( - config = flags{} -) - -type flags struct { - name string - content string -} - -var Cmd = &cobra.Command{ - Use: "produce", - Short: "Produce an event", - Long: `Produce an event to a queue`, - Args: cobra.NoArgs, - Run: exec, -} - -func init() { - Cmd.Flags().StringVarP(&config.name, "name", "n", "", "The name of the queue") - Cmd.Flags().StringVarP(&config.content, "content", "c", "", "The content of the event") - Cmd.MarkFlagsRequiredTogether("name", "content") -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - cli := adminclient.NewAPIClient(cfg) - - res, err := cli.TubeAPI.ProduceMessage(context.Background(), config.name).Body(config.content).Execute() - if err != nil { - fmt.Printf("Failed to produce event: %v\n", err) - os.Exit(1) - } - if res.StatusCode != 200 { - fmt.Printf("Failed to produce event: %v\n", res.Status) - os.Exit(1) - } - fmt.Println("Event produced") -} diff --git a/cmd/client/reload/cmd.go b/cmd/client/reload/cmd.go deleted file mode 100644 index 1762ab67..00000000 --- a/cmd/client/reload/cmd.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package reload - -import ( - "context" - "fmt" - "os" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/cmd/client/common" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "reload", - Short: "Reload Functions", - Long: `Reload functions from the function store`, - Args: cobra.NoArgs, - Run: exec, -} - -func exec(_ *cobra.Command, _ []string) { - cfg := adminclient.NewConfiguration() - cfg.Servers = []adminclient.ServerConfiguration{{ - URL: common.Config.ServiceAddr, - }} - cli := adminclient.NewAPIClient(cfg) - - res, err := cli.FunctionStoreAPI.ReloadFunctions(context.Background()).Execute() - if err != nil { - fmt.Printf("Failed to reload functions: %v\n", err) - os.Exit(1) - } - if res.StatusCode != 200 { - fmt.Printf("Failed to reload functions with status code: %d\n", res.StatusCode) - os.Exit(1) - } -} diff --git a/cmd/main.go b/cmd/main.go deleted file mode 100644 index 7f56fdfe..00000000 --- a/cmd/main.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "os" - - "github.com/functionstream/function-stream/cmd/client" - "github.com/functionstream/function-stream/cmd/perf" - "github.com/functionstream/function-stream/cmd/server" - "github.com/spf13/cobra" -) - -var ( - rootCmd = &cobra.Command{ - Use: "function-stream", - Short: "function-stream root command", - Long: `function-stream root command`, - } -) - -func init() { - rootCmd.AddCommand(server.Cmd) - rootCmd.AddCommand(client.Cmd) - rootCmd.AddCommand(perf.Cmd) -} - -func main() { - if err := rootCmd.Execute(); err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} diff --git a/cmd/perf/cmd.go b/cmd/perf/cmd.go deleted file mode 100644 index 84c055ea..00000000 --- a/cmd/perf/cmd.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package perf - -import ( - "context" - "io" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/perf" - "github.com/spf13/cobra" -) - -var ( - Cmd = &cobra.Command{ - Use: "perf", - Short: "Function Stream perf client", - Long: `Tool for basic performance tests`, - Run: exec, - } - - config = &perf.Config{} -) - -func init() { - Cmd.Flags().StringVarP(&config.PulsarURL, "pulsar-url", "p", "pulsar://localhost:6650", "Pulsar URL") - Cmd.Flags().Float64VarP(&config.RequestRate, "rate", "r", 100.0, "Request rate, ops/s") -} - -func exec(*cobra.Command, []string) { - common.RunProcess(runPerf) -} - -type closer struct { - ctx context.Context - cancel context.CancelFunc -} - -func newCloser(ctx context.Context) *closer { - c := &closer{} - c.ctx, c.cancel = context.WithCancel(ctx) - return c -} - -func (c *closer) Close() error { - c.cancel() - return nil -} - -func runPerf() (io.Closer, error) { - closer := newCloser(context.Background()) - go perf.New(config).Run(closer.ctx) - return closer, nil -} diff --git a/cmd/server/cmd.go b/cmd/server/cmd.go deleted file mode 100644 index d8d69df2..00000000 --- a/cmd/server/cmd.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "context" - "io" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/server" - "github.com/spf13/cobra" -) - -var ( - Cmd = &cobra.Command{ - Use: "server", - Short: "Start a server", - Long: `Start a server`, - Run: exec, - } -) - -type flags struct { - configFile string - loadConfigFromEnv bool -} - -var ( - config = flags{} -) - -func init() { - Cmd.Flags().StringVarP(&config.configFile, "config-file", "c", "conf/function-stream.yaml", - "path to the config file (default is conf/function-stream.yaml)") - Cmd.Flags().BoolVarP(&config.loadConfigFromEnv, "load-config-from-env", "e", false, - "load config from env (default is false)") -} - -func exec(*cobra.Command, []string) { - common.RunProcess(func() (io.Closer, error) { - var c *server.Config - var err error - if config.loadConfigFromEnv { - c, err = server.LoadConfigFromEnv() - if err != nil { - return nil, err - } - } else { - c, err = server.LoadConfigFromFile(config.configFile) - if err != nil { - return nil, err - } - } - s, err := server.NewServer( - server.WithTubeFactoryBuilders(server.GetBuiltinTubeFactoryBuilder()), - server.WithRuntimeFactoryBuilders(server.GetBuiltinRuntimeFactoryBuilder()), - server.WithConfig(c)) - if err != nil { - return nil, err - } - go s.Run(context.Background()) - return s, nil - }) -} diff --git a/common/buffer_reader.go b/common/buffer_reader.go deleted file mode 100644 index f317b1f4..00000000 --- a/common/buffer_reader.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import "io" - -type BufferReader struct { - buffer []byte -} - -func (r *BufferReader) Read(p []byte) (n int, err error) { - if len(r.buffer) == 0 { - return 0, io.EOF - } - - copied := copy(p[n:], r.buffer) - n += copied - r.buffer = r.buffer[copied:] - - return n, nil -} - -func (r *BufferReader) ResetBuffer(data []byte) { - r.buffer = data -} - -func NewChanReader() *BufferReader { - return &BufferReader{ - buffer: nil, - } -} diff --git a/common/buffer_reader_test.go b/common/buffer_reader_test.go deleted file mode 100644 index 498acdc3..00000000 --- a/common/buffer_reader_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import ( - "io" - "testing" -) - -func TestChanReader_Read_HappyPath(t *testing.T) { - reader := NewChanReader() - reader.ResetBuffer([]byte("Hello, world!")) - buffer := make([]byte, 13) - - n, err := reader.Read(buffer) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if n != 13 { - t.Errorf("Expected to read 13 bytes, but read %d", n) - } - - if string(buffer) != "Hello, world!" { - t.Errorf("Expected to read 'Hello, world!', but read '%s'", buffer) - } -} - -func TestChanReader_Read_EmptyChannel(t *testing.T) { - reader := NewChanReader() - reader.ResetBuffer([]byte("")) - buffer := make([]byte, 10) - - n, err := reader.Read(buffer) - if err != io.EOF { - t.Errorf("Expected error to be io.EOF, but got %v", err) - } - - if n != 0 { - t.Errorf("Expected to read 0 bytes, but read %d", n) - } -} - -func TestChanReader_Read_BufferSmallerThanData(t *testing.T) { - reader := NewChanReader() - reader.ResetBuffer([]byte("Hello, world!")) - buffer := make([]byte, 5) - - n, err := reader.Read(buffer) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if n != 5 { - t.Errorf("Expected to read 5 bytes, but read %d", n) - } - - if string(buffer) != "Hello" { - t.Errorf("Expected to read 'Hello', but read '%s'", buffer) - } -} - -func TestChanReader_Read_BufferLargerThanData(t *testing.T) { - reader := NewChanReader() - reader.ResetBuffer([]byte("Hello")) - buffer := make([]byte, 10) - - n, err := reader.Read(buffer) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if n != 5 { - t.Errorf("Expected to read 5 bytes, but read %d", n) - } - - if string(buffer[:n]) != "Hello" { - t.Errorf("Expected to read 'Hello', but read '%s'", buffer[:n]) - } -} diff --git a/common/buffer_writter.go b/common/buffer_writter.go deleted file mode 100644 index ff07b26e..00000000 --- a/common/buffer_writter.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -type BufferWriter struct { - buffer []byte -} - -func (w *BufferWriter) Write(p []byte) (n int, err error) { - if w.buffer == nil { - w.buffer = make([]byte, 0) - } - w.buffer = append(w.buffer, p...) - return len(p), nil -} - -func (w *BufferWriter) GetAndReset() []byte { - result := w.buffer - w.buffer = nil - return result -} - -func NewChanWriter() *BufferWriter { - return &BufferWriter{} -} diff --git a/common/buffer_writter_test.go b/common/buffer_writter_test.go deleted file mode 100644 index fadefc92..00000000 --- a/common/buffer_writter_test.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import ( - "testing" -) - -func TestChanWriter_Write_HappyPath(t *testing.T) { - writer := NewChanWriter() - n, err := writer.Write([]byte("Hello, world!")) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if n != 13 { - t.Errorf("Expected to write 13 bytes, but wrote %d", n) - } - - data := writer.GetAndReset() - if string(data) != "Hello, world!" { - t.Errorf("Expected to write 'Hello, world!', but wrote '%s'", data) - } -} - -func TestChanWriter_Write_EmptyData(t *testing.T) { - writer := NewChanWriter() - n, err := writer.Write([]byte("")) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if n != 0 { - t.Errorf("Expected to write 0 bytes, but wrote %d", n) - } - - data := writer.GetAndReset() - if string(data) != "" { - t.Errorf("Expected to write '', but wrote '%s'", data) - } -} diff --git a/common/chan_utils.go b/common/chan_utils.go deleted file mode 100644 index af20a4b5..00000000 --- a/common/chan_utils.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import ( - "context" -) - -func SendToChannel[T any](ctx context.Context, c chan<- T, e interface{}) bool { - select { - case c <- e.(T): // It will panic if `e` is not of type `T` or a type that can be converted to `T`. - return true - case <-ctx.Done(): - close(c) - return false - } -} - -func zeroValue[T any]() T { - var v T - return v -} - -func ReceiveFromChannel[T any](ctx context.Context, c <-chan T) (T, bool) { - select { - case e := <-c: - return e, true - case <-ctx.Done(): - return zeroValue[T](), false - } -} diff --git a/common/chan_utils_test.go b/common/chan_utils_test.go deleted file mode 100644 index d631a203..00000000 --- a/common/chan_utils_test.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import ( - "context" - "reflect" - "testing" - "time" -) - -func TestSendToChannel(t *testing.T) { - - t.Run("send_buffered_chan_success", func(t *testing.T) { - - c := make(chan string, 1) - ctx := context.Background() - if !SendToChannel(ctx, c, "data") { - t.Fatal("SendToChannel should return true when sending succeeds") - } - value := <-c - if value != "data" { - t.Errorf("expected to receive \"data\" from channel, but received %s", value) - } - - }) - - t.Run("send_unbuffered_chan_success", func(t *testing.T) { - c := make(chan string) - ctx := context.Background() - - go func() { - SendToChannel(ctx, c, "data") - }() - - value := <-c - if value != "data" { - t.Errorf("expected to receive \"data\" from channel, but received %s", value) - } - - }) - - t.Run("context_timeout", func(t *testing.T) { - // Using time.Sleep to simulating context timeout - - c := make(chan string) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - time.Sleep(1 * time.Second) // Set timeout - if k := SendToChannel(ctx, c, "hello"); k { - t.Fatal("context timeout but data sent successfully") - } else { - t.Log("failed to send data due to context timeout") - } - - }) - - t.Run("incorrect_type", func(t *testing.T) { - - defer func() { - if r := recover(); r != nil { - t.Log("test-ok") - } else { - t.Log("test-fail") - } - }() - c := make(chan int) - ctx := context.Background() - SendToChannel(ctx, c, "incorrect type") - - }) -} - -func TestZeroValue(t *testing.T) { - - testZeroValue := func(name string, got, want interface{}) { - t.Run(name, func(t *testing.T) { - if !reflect.DeepEqual(got, want) { - t.Errorf("zeroValue() = %v, want %v", got, want) - } - }) - } - - testZeroValue("int", zeroValue[int](), 0) - testZeroValue("float64", zeroValue[float64](), float64(0)) - testZeroValue("string", zeroValue[string](), "") - testZeroValue("bool", zeroValue[bool](), false) - -} - -func TestReceiveFromChannel(t *testing.T) { - // Since SendToChannel has already been tested, only buffered chan will be considered here - - t.Run("Success", func(t *testing.T) { - ctx := context.Background() - ch := make(chan string, 1) - SendToChannel(ctx, ch, "test-data") - value, ok := ReceiveFromChannel(ctx, ch) - if ok { - t.Log("successfully received data") - } - if value != "test-data" { - t.Errorf("receive failed,expected value to be \"test-data\", but it's %s", value) - } - - }) - - t.Run("Timeout", func(t *testing.T) { - - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - ch := make(chan string, 1) - time.Sleep(1 * time.Second) - // No need to send data to SendToChannel as the context has been set to expire - value, ok := ReceiveFromChannel(ctx, ch) - if ok { - t.Fatal("due to timeout setting, it is expected that no value will be received from the channel") - } - if value != "" { - t.Errorf("expected zero value for string, but it's %s", value) - } - - }) - - t.Run("Canceled", func(t *testing.T) { - - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel context - ch := make(chan string, 1) - value, ok := ReceiveFromChannel(ctx, ch) - if ok { - t.Fatal("expected no value to be received from channel due to context cancellation") - } - if value != "" { - t.Errorf("expected zero value for string, but it's %s", value) - } - - }) -} diff --git a/common/config/config.go b/common/config/config.go deleted file mode 100644 index 2367a4eb..00000000 --- a/common/config/config.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package config - -import ( - "encoding/json" - - "github.com/go-playground/validator/v10" -) - -// ConfigMap is a custom type that represents a map where keys are strings and values are of any type. -// Since Viper is not case-sensitive, we use '-' to separate words in all field names in the config map. -// This convention helps in maintaining consistency across different configurations and makes them easier to read. -// -// For example: -// - `socket-path` refers to the path of the socket. -// - `pulsar-url` refers to the URL of the Pulsar service. -type ConfigMap map[string]interface{} - -// MergeConfig merges multiple ConfigMap into one -func MergeConfig(configs ...ConfigMap) ConfigMap { - result := ConfigMap{} - for _, config := range configs { - for k, v := range config { - result[k] = v - } - } - return result -} - -func (c ConfigMap) ToConfigStruct(v any) error { - jsonData, err := json.Marshal(c) - if err != nil { - return err - } - if err := json.Unmarshal(jsonData, v); err != nil { - return err - } - validate := validator.New() - return validate.Struct(v) -} - -func ToConfigMap(v any) (ConfigMap, error) { - jsonData, err := json.Marshal(v) - if err != nil { - return nil, err - } - var result ConfigMap - if err := json.Unmarshal(jsonData, &result); err != nil { - return nil, err - } - return result, nil -} diff --git a/common/constants.go b/common/constants.go deleted file mode 100644 index d6150160..00000000 --- a/common/constants.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -const ( - PulsarTubeType = "pulsar" - MemoryTubeType = "memory" - HttpTubeType = "http" - EmptyTubeType = "empty" - NatsTubeType = "nats" - - WASMRuntime = "wasm" - ExternalRuntime = "external" - - RuntimeArchiveConfigKey = "archive" - - StateStorePebble = "pebble" -) diff --git a/common/errors.go b/common/errors.go deleted file mode 100644 index 582049f2..00000000 --- a/common/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import "fmt" - -var ( - ErrorFunctionNotFound = fmt.Errorf("function not found") - ErrorFunctionExists = fmt.Errorf("function already exists") - ErrorFunctionUnsupportedRuntime = fmt.Errorf("function does not support runtime") - ErrorRuntimeFactoryNotFound = fmt.Errorf("runtime factory not found") - ErrorTubeFactoryNotFound = fmt.Errorf("tube factory not found") - ErrorPackageNoSupportedRuntime = fmt.Errorf("package does not support any runtime") -) diff --git a/common/log.go b/common/log.go deleted file mode 100644 index 25afa9bf..00000000 --- a/common/log.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import ( - "context" - - "github.com/go-logr/logr" - "github.com/go-logr/zapr" - "go.uber.org/zap" -) - -const ( - DebugLevel int = -1 - InfoLevel int = 0 // Default log level - WarnLevel int = 1 -) - -type Logger struct { - *logr.Logger -} - -func NewDefaultLogger() *Logger { - zapLogger, err := zap.NewDevelopment() - if err != nil { - panic("failed to create zap logger:" + err.Error()) - } - zaprLog := zapr.NewLogger(zapLogger) - return NewLogger(&zaprLog) -} - -func NewLogger(logger *logr.Logger) *Logger { - return &Logger{logger} -} - -func (l *Logger) DebugEnabled() bool { - return l.GetV() <= DebugLevel -} - -func (l *Logger) Debug(msg string, keysAndValues ...interface{}) { - if l.DebugEnabled() { - l.V(DebugLevel).Info(msg, keysAndValues...) - } -} - -func (l *Logger) Warn(msg string, keysAndValues ...interface{}) { - l.V(WarnLevel).Info(msg, keysAndValues...) -} - -func (l *Logger) Info(msg string, keysAndValues ...interface{}) { - l.V(InfoLevel).Info(msg, keysAndValues...) -} - -func (l *Logger) SubLogger(keysAndValues ...any) *Logger { - internalLogger := l.WithValues(keysAndValues...) - return &Logger{&internalLogger} -} - -type loggerKey struct{} - -func WithLogger(ctx context.Context, logger *Logger) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -func GetLogger(ctx context.Context) *Logger { - logger, ok := ctx.Value(loggerKey{}).(*Logger) - if !ok { - return NewDefaultLogger() - } - return logger -} diff --git a/common/model/function.go b/common/model/function.go deleted file mode 100644 index 95d8d019..00000000 --- a/common/model/function.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package model - -import ( - "strings" - - "github.com/functionstream/function-stream/common/config" - - "github.com/functionstream/function-stream/fs/contube" - "github.com/pkg/errors" -) - -type TubeConfig struct { - Type string `json:"type"` - Config contube.ConfigMap `json:"config,omitempty"` -} - -type RuntimeConfig struct { - Config config.ConfigMap `json:"config,omitempty"` - Type string `json:"type"` -} - -type Function struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` - Package string `json:"package"` - Module string `json:"module"` - Runtime RuntimeConfig `json:"runtime"` - Sources []TubeConfig `json:"source"` - Sink TubeConfig `json:"sink"` - State config.ConfigMap `json:"state,omitempty"` - Config map[string]string `json:"config,omitempty"` - Replicas int32 `json:"replicas"` -} - -func (f *Function) Validate() error { - if f.Name == "" { - return errors.New("function name shouldn't be empty") - } - if strings.Contains(f.Name, "/") { - return errors.New("name should not contain '/'") - } - if strings.Contains(f.Namespace, "/") { - return errors.New("namespace should not contain '/'") - } - if len(f.Sources) == 0 { - return errors.New("sources should be configured") - } - if f.Replicas <= 0 { - return errors.New("replicas should be greater than 0") - } - return nil -} diff --git a/common/model/function_serde_test.go b/common/model/function_serde_test.go deleted file mode 100644 index f50dfca8..00000000 --- a/common/model/function_serde_test.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package model - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - - "gopkg.in/yaml.v3" -) - -func TestFunctionSerde(t *testing.T) { - f := Function{ - Name: "TestFunction", - Runtime: RuntimeConfig{Type: "runtime", Config: map[string]interface{}{"key": "value"}}, - Sources: []TubeConfig{{Type: "source", Config: map[string]interface{}{"key": "value"}}}, - Sink: TubeConfig{Type: "sink", Config: map[string]interface{}{"key": "value"}}, - State: map[string]interface{}{"key": "value"}, - Config: map[string]string{"key": "value"}, - Replicas: 2, - } - - // JSON Serialization - data, err := json.Marshal(f) - if err != nil { - t.Fatal("JSON Serialization error:", err) - } - - fmt.Println(string(data)) - - // JSON Deserialization - var f2 Function - err = json.Unmarshal(data, &f2) - if err != nil { - t.Fatal("JSON Deserialization error:", err) - } - - if !reflect.DeepEqual(f, f2) { - t.Error("JSON Deserialization does not match original") - } - - // YAML Serialization - data, err = yaml.Marshal(f) - if err != nil { - t.Fatal("YAML Serialization error:", err) - } - - fmt.Println(string(data)) - - // YAML Deserialization - err = yaml.Unmarshal(data, &f2) - if err != nil { - t.Fatal("YAML Deserialization error:", err) - } - - if !reflect.DeepEqual(f, f2) { - t.Error("YAML Deserialization does not match original") - } -} - -func TestFunctionSerdeWithNil(t *testing.T) { - f := Function{ - Name: "TestFunction", - Runtime: RuntimeConfig{Config: map[string]interface{}{}}, - Sources: []TubeConfig{}, - Sink: TubeConfig{Config: map[string]interface{}{}}, - State: map[string]interface{}{}, - Config: map[string]string{"key": "value"}, - Replicas: 2, - } - - // JSON Serialization - data, err := json.Marshal(f) - if err != nil { - t.Fatal("JSON Serialization error:", err) - } - - fmt.Println(string(data)) - - // JSON Deserialization - var f2 Function - err = json.Unmarshal(data, &f2) - if err != nil { - t.Fatal("JSON Deserialization error:", err) - } - - // TODO: We should override the MarshalJson for the Function - f2.Sink.Config = map[string]interface{}{} - f2.Runtime.Config = map[string]interface{}{} - f2.State = map[string]interface{}{} - - if !reflect.DeepEqual(f, f2) { - t.Error("JSON Deserialization does not match original") - } - - // YAML Serialization - data, err = yaml.Marshal(f) - if err != nil { - t.Fatal("YAML Serialization error:", err) - } - - fmt.Println(string(data)) - - // YAML Deserialization - err = yaml.Unmarshal(data, &f2) - if err != nil { - t.Fatal("YAML Deserialization error:", err) - } - - if !reflect.DeepEqual(f, f2) { - t.Error("YAML Deserialization does not match original") - } -} diff --git a/common/signal.go b/common/signal.go deleted file mode 100644 index 93315b86..00000000 --- a/common/signal.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2023 StreamNative, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "io" - "log/slog" - "os" - "os/signal" - "syscall" -) - -func WaitUntilSignal(closers ...io.Closer) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - sig := <-c - slog.Info( - "Received signal, exiting", - slog.String("signal", sig.String()), - ) - - code := 0 - for _, closer := range closers { - if err := closer.Close(); err != nil { - slog.Error( - "Failed when shutting down server", - slog.Any("error", err), - ) - code = 1 - } - } - - if code == 0 { - slog.Info("Shutdown Completed") - } - os.Exit(code) -} diff --git a/common/utils.go b/common/utils.go deleted file mode 100644 index d4abc6ec..00000000 --- a/common/utils.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package common - -import "log/slog" - -func OptionalStr(s string) *string { - return &s -} - -type expensive struct { - slog.LogValuer - f func() slog.Value -} - -func (e *expensive) LogValue() slog.Value { - return e.f() -} - -func Expensive(f func() slog.Value) slog.LogValuer { - e := &expensive{} - e.f = f - return e -} - -type logCounter struct { - slog.LogValuer - count int -} - -func (l *logCounter) LogValue() slog.Value { - l.count++ - return slog.IntValue(l.count) -} - -func LogCounter() slog.LogValuer { - return &logCounter{} -} - -type NamespacedName struct { - namespace string - name string -} - -func (n NamespacedName) String() string { - if n.namespace == "" { - return n.name - } - return n.namespace + "/" + n.name -} - -func GetNamespacedName(namespace, name string) NamespacedName { - return NamespacedName{ - namespace: namespace, - name: name, - } -} diff --git a/common/wasm_utils/wasm_utils.go b/common/wasm_utils/wasm_utils.go deleted file mode 100644 index 60f7d004..00000000 --- a/common/wasm_utils/wasm_utils.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm_utils - -import "unsafe" - -// StringToPtr returns a pointer and size pair for the given string in a way -// compatible with WebAssembly numeric types. -// The returned pointer aliases the string hence the string must be kept alive -// until ptr is no longer needed. -func StringToPtr(s string) (uint32, uint32) { - ptr := unsafe.Pointer(unsafe.StringData(s)) - return uint32(uintptr(ptr)), uint32(len(s)) -} - -//// StringToLeakedPtr returns a pointer and size pair for the given string in a way -//// compatible with WebAssembly numeric types. -//// The pointer is not automatically managed by TinyGo hence it must be freed by the host. -//func StringToLeakedPtr(s string) (uint32, uint32) { -// size := C.ulong(len(s)) -// ptr := unsafe.Pointer(C.malloc(size)) -// copy(unsafe.Slice((*byte)(ptr), size), s) -// return uint32(uintptr(ptr)), uint32(size) -//} - -func PtrSize(ptr, size uint32) uint64 { - return (uint64(ptr) << 32) | uint64(size) -} - -func ExtractPtrSize(ptrSize uint64) (uint32, uint32) { - return uint32(ptrSize >> 32), uint32(ptrSize) -} diff --git a/conf/config.yaml b/conf/config.yaml new file mode 100644 index 00000000..5e61fc54 --- /dev/null +++ b/conf/config.yaml @@ -0,0 +1,119 @@ +# Function Stream Configuration +service: + service_id: "my-service-001" + service_name: "function-stream" + version: "1.0.0" + host: "127.0.0.1" + port: 8080 + # workers: null # If specified, overrides worker_multiplier + # worker_multiplier: 4 # CPU cores × multiplier (default: 4 if not specified) + debug: false + +logging: + level: info + format: json + file_path: logs/app.log # Log file path (directory will be created automatically) + max_file_size: 100 + max_files: 5 + +# Python Runtime Configuration +# Configuration for python wasm runtime parameters +python: + # Path to python wasm file + # Default: data/cache/python-runner/functionstream-python-runtime.wasm + wasm_path: data/cache/python-runner/functionstream-python-runtime.wasm + + # Pre-compiled component cache directory + # Default: data/cache/python-runner + cache_dir: data/cache/python-runner + + # Enable component caching + # If true, pre-compiled components will be cached to speed up subsequent loads + # Default: true + enable_cache: true + +# WASM Runtime Configuration +# Configuration for wasm incremental compilation cache parameters +wasm: + # Incremental compilation cache directory + # Default: .cache/wasm-incremental + cache_dir: .cache/wasm-incremental + + # Enable incremental compilation cache + # If true, compiled wasm modules will be cached to speed up subsequent compilations + # Default: true + enable_cache: true + + # Maximum cache size (bytes) + # Default: 100MB (104857600 bytes) + # When cache exceeds this size, least recently used items will be evicted + max_cache_size: 104857600 + +# State Storage Configuration +# Used to store runtime state data for tasks +state_storage: + # Storage type: memory or rocksdb (persistent) + # Default: memory + storage_type: rocksdb + + # Base directory path (only used when storage_type is rocksdb) + # Final path format: {base_dir}/state/{task_name}-{created_at} + # Example: if base_dir is "data", task name is "my_task", created_at is 1234567890 + # then full path is: data/state/my_task-1234567890 + # Default: data + base_dir: data + + # RocksDB configuration (only used when storage_type is rocksdb) + rocksdb: + # Maximum number of open files (optional) + # Default: None (uses RocksDB default) + max_open_files: 1000 + + # Write buffer size in bytes (optional) + # Default: None (uses RocksDB default) + # Example: 67108864 represents 64MB + write_buffer_size: 67108864 + + # Maximum number of write buffers (optional) + # Default: None (uses RocksDB default) + max_write_buffer_number: 3 + + # Target file size base in bytes (optional) + # Default: None (uses RocksDB default) + # Example: 67108864 represents 64MB + target_file_size_base: 67108864 + + # Maximum bytes for level base in bytes (optional) + # Default: None (uses RocksDB default) + # Example: 268435456 represents 256MB + max_bytes_for_level_base: 268435456 + +# Task Storage Configuration +# Used to store task metadata (task name, wasm bytes, config bytes, status, created_at, etc.) +task_storage: + # Storage type: rocksdb (persistent) + # Default: rocksdb + storage_type: rocksdb + + # Database path (optional) + # If null or not specified, uses default path: data/task/{task_name} + # Example: if task name is "my_task", default path is data/task/my_task + # If a path is specified, uses the specified path + db_path: null + + # RocksDB configuration + rocksdb: + # Maximum number of open files (optional) + max_open_files: 1000 + + # Write buffer size in bytes (optional) + write_buffer_size: 67108864 + + # Maximum number of write buffers (optional) + max_write_buffer_number: 3 + + # Target file size base in bytes (optional) + target_file_size_base: 67108864 + + # Maximum bytes for level base in bytes (optional) + max_bytes_for_level_base: 268435456 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..cad26e5c --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,62 @@ +x-logging: &default-logging + options: + max-size: "12m" + max-file: "5" + driver: json-file + +services: + function-stream: + build: + context: . + dockerfile: Dockerfile + image: function-stream:latest + container_name: function-stream + hostname: function-stream + ports: + - "8080:8080" + volumes: + - function-stream-data:/app/data + - function-stream-logs:/app/logs + environment: + - FUNCTION_STREAM_CONFIG=/app/conf/config.yaml + logging: *default-logging + networks: + - spnet + depends_on: + - kafka + + kafka: + image: apache/kafka + hostname: kafka + ports: + - "9092:9092" + environment: + - KAFKA_CFG_NODE_ID=0 + - KAFKA_CFG_PROCESS_ROLES=controller,broker + - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 + - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,OUTSIDE://:9094 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,OUTSIDE://localhost:9094 + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,OUTSIDE:PLAINTEXT + - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT + - KAFKA_CFG_MESSAGE_MAX_BYTES=5000012 + - KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES=5000012 + - KAFKA_CFG_REPLICA_FETCH_MAX_BYTES=10000000 + - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER + volumes: + - kafka3:/bitnami + logging: *default-logging + networks: + - spnet + +volumes: + kafka3: + function-stream-data: + function-stream-logs: + +networks: + spnet: + name: spnet + driver: bridge + ipam: + config: + - subnet: 172.31.0.0/16 diff --git a/docs/images/arch.png b/docs/images/arch.png deleted file mode 100644 index 5e2d07d2..00000000 Binary files a/docs/images/arch.png and /dev/null differ diff --git a/examples/basic/main.go b/examples/basic/main.go deleted file mode 100644 index 38755536..00000000 --- a/examples/basic/main.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "github.com/functionstream/function-stream/clients/gofs" - "log/slog" -) - -func main() { - slog.Info("Hello from Go function!") - err := gofs.NewFSClient(). - Register(gofs.DefaultModule, gofs.WithFunction(gofs.NewSimpleFunction(myProcess))). - Run() - if err != nil { - slog.Error(err.Error()) - } -} - -type Person struct { - Name string `json:"name"` - Money int `json:"money"` - Expected int `json:"expected"` -} - -func myProcess(ctx gofs.FunctionContext, e gofs.Event[Person]) (gofs.Event[Person], error) { - person := e.Data() - person.Money += 1 - return gofs.NewEvent(person), nil -} diff --git a/examples/examples-validator/Cargo.lock b/examples/examples-validator/Cargo.lock new file mode 100644 index 00000000..daead670 --- /dev/null +++ b/examples/examples-validator/Cargo.lock @@ -0,0 +1,829 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "anstream" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.61.2", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "cc" +version = "1.2.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "clap" +version = "4.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "examples-validator" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "rdkafka", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rdkafka" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f1856d72dbbbea0d2a5b2eaf6af7fb3847ef2746e883b11781446a51dbc85c0" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.9.0+2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" +dependencies = [ + "cmake", + "libc", + "libz-sys", + "num_enum", + "pkg-config", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "syn" +version = "2.0.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21f182278bf2d2bcb3c88b1b08a37df029d71ce3d3ae26168e3c653b213b99d4" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.10+spec-1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +dependencies = [ + "winnow", +] + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + +[[package]] +name = "zmij" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de9211a9f64b825911bdf0240f58b7a8dac217fe260fc61f080a07f61372fbd5" diff --git a/examples/examples-validator/Cargo.toml b/examples/examples-validator/Cargo.toml new file mode 100644 index 00000000..de87dbb3 --- /dev/null +++ b/examples/examples-validator/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "examples-validator" +version = "0.1.0" +edition = "2021" + +[workspace] + +[[bin]] +name = "kafka_test" +path = "kafka_test.rs" + +[dependencies] +tokio = { version = "1.0", features = ["full"] } +rdkafka = { version = "0.38", features = ["cmake-build"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +anyhow = "1.0" +clap = { version = "4.5", features = ["derive", "env"] } +tracing = "0.1" +tracing-subscriber = "0.3" + diff --git a/examples/examples-validator/kafka_test.rs b/examples/examples-validator/kafka_test.rs new file mode 100644 index 00000000..10ce1c40 --- /dev/null +++ b/examples/examples-validator/kafka_test.rs @@ -0,0 +1,267 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::{Context, Result}; +use clap::Parser; +use rdkafka::config::ClientConfig; +use rdkafka::consumer::{Consumer, StreamConsumer}; +use rdkafka::message::{BorrowedMessage, Message}; +use rdkafka::producer::{FutureProducer, FutureRecord}; +use rdkafka::util::Timeout; +use serde::Deserialize; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; +use tokio::signal; +use tokio::sync::Notify; +use tracing::{error, info, warn}; +#[derive(Parser, Debug, Clone)] +#[command(author, version, about, long_about = None)] +struct AppConfig { + /// Kafka Broker List (e.g., "127.0.0.1:9092") + #[arg(long, default_value = "127.0.0.1:9092", env = "KAFKA_BROKERS")] + brokers: String, + + /// Input Topic (Producer Target) + #[arg(long, default_value = "input-topic", env = "INPUT_TOPIC")] + input_topic: String, + + /// Output Topic (Consumer Source) + #[arg(long, default_value = "output-topic", env = "OUTPUT_TOPIC")] + output_topic: String, + + /// Consumer Group ID + #[arg(long, default_value = "industrial-verifier-v1", env = "GROUP_ID")] + group_id: String, + + /// Total number of messages to produce for the test + #[arg(long, default_value_t = 10000)] + msg_count: usize, +} + +/// Represents the expected JSON structure from the processor. +/// Example: {"total_processed": 100, "counter_map": {"apple": 50, "banana": 50}} +#[derive(Debug, Deserialize)] +struct ProcessingResult { + total_processed: u64, + counter_map: HashMap, +} + +impl ProcessingResult { + /// Business Logic Validation: + /// Checks if the sum of all counters equals the declared total. + fn validate_consistency(&self) -> bool { + let calculated_sum: u64 = self.counter_map.values().sum(); + calculated_sum == self.total_processed + } +} + +// ============================================================================ +// 2. Main Entry Point +// ============================================================================ + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize structured logging (Info level by default) + tracing_subscriber::fmt() + .with_target(false) + .with_thread_ids(true) + .with_level(true) + .init(); + + // Parse configuration + let config = AppConfig::parse(); + info!("Starting Kafka Driver with config: {:?}", config); + + // Create a notification channel for graceful shutdown + let shutdown_notify = Arc::new(Notify::new()); + let shutdown_consumer = shutdown_notify.clone(); + + // Spawn Producer and Consumer as concurrent async tasks + let producer_handle = tokio::spawn(run_producer(config.clone())); + let consumer_handle = tokio::spawn(run_consumer(config.clone(), shutdown_consumer)); + + // Wait for system signals (Ctrl+C / SIGTERM) + match signal::ctrl_c().await { + Ok(()) => { + info!("Shutdown signal received. Stopping services gracefully..."); + // Notify the consumer loop to break + shutdown_notify.notify_waiters(); + } + Err(err) => { + error!("Unable to listen for shutdown signal: {}", err); + } + } + + // Wait for tasks to finish (cleanup) + let _ = tokio::join!(producer_handle, consumer_handle); + + info!("Application execution completed."); + Ok(()) +} + +// ============================================================================ +// 3. Producer Logic +// ============================================================================ + +async fn run_producer(config: AppConfig) -> Result<()> { + info!("Initializing Producer targeting topic: {}", config.input_topic); + + let producer: FutureProducer = ClientConfig::new() + .set("bootstrap.servers", &config.brokers) + .set("message.timeout.ms", "5000") + // Enable idempotence for exactly-once semantics + .set("enable.idempotence", "true") + .create() + .context("Failed to create Kafka Producer")?; + + let test_data = vec!["apple", "banana", "cherry", "date", "elderberry"]; + + // Simulate data stream + for i in 0..config.msg_count { + let data = test_data[i % test_data.len()]; + let key = format!("key-{}", i); + + // Construct the record + let record = FutureRecord::to(&config.input_topic) + .key(&key) + .payload(data); + + // Send asynchronously with a timeout + let delivery_status = producer + .send(record, Timeout::After(Duration::from_secs(5))) + .await; + + match delivery_status { + Ok(delivery) => { + // Log progress every 1000 messages to reduce noise + if (i + 1) % 1000 == 0 { + info!( + "✓ Produced [{}/{}] -> Partition: {}, Offset: {}", + i + 1, config.msg_count, delivery.partition, delivery.offset + ); + } + } + Err((e, _)) => error!("✗ Failed to send message #{}: {:?}", i, e), + } + + // Slight delay to simulate real-world throughput (prevents local buffer overflow) + tokio::time::sleep(Duration::from_millis(1)).await; + } + + info!("Producer finished. Total messages sent: {}", config.msg_count); + Ok(()) +} + +// ============================================================================ +// 4. Consumer Logic +// ============================================================================ + +async fn run_consumer(config: AppConfig, shutdown: Arc) -> Result<()> { + info!("Initializing Consumer listening on topic: {}", config.output_topic); + + let consumer: StreamConsumer = ClientConfig::new() + .set("group.id", &config.group_id) + .set("bootstrap.servers", &config.brokers) + .set("enable.partition.eof", "false") + .set("session.timeout.ms", "6000") + // Auto-commit is enabled, but strictly robust apps might manage offsets manually + .set("enable.auto.commit", "true") + .set("auto.offset.reset", "earliest") + .create() + .context("Failed to create Kafka Consumer")?; + + consumer + .subscribe(&[&config.output_topic]) + .context("Failed to subscribe to topic")?; + + info!("Consumer started. Waiting for messages..."); + + loop { + tokio::select! { + // Branch 1: Receive message from Kafka + recv_result = consumer.recv() => { + match recv_result { + Ok(m) => { + // Process the message (Validation & Logging) + if let Err(e) = process_message(&m) { + error!("Business logic error while processing message: {:?}", e); + } + }, + Err(e) => warn!("Kafka error during consumption: {}", e), + } + } + // Branch 2: Handle graceful shutdown signal + _ = shutdown.notified() => { + info!("Consumer received shutdown signal. Exiting loop."); + break; + } + } + } + + Ok(()) +} + +// ============================================================================ +// 5. Business Logic & Validation +// ============================================================================ + +/// Parses the payload, validates business rules, and logs the result. +fn process_message(message: &BorrowedMessage) -> Result<()> { + // 1. Extract payload as string + let payload = match message.payload_view::() { + None => return Ok(()), // Ignore empty messages + Some(Ok(s)) => s, + Some(Err(e)) => { + warn!("Payload encoding error (not UTF-8): {:?}", e); + return Ok(()); + } + }; + + // 2. Strong Typing: Parse JSON into struct + match serde_json::from_str::(payload) { + Ok(result) => { + // 3. Data Integrity Check + if result.validate_consistency() { + info!( + topic = message.topic(), + offset = message.offset(), + total = result.total_processed, + details = ?result.counter_map, + "✓ Data Validation Passed" + ); + } else { + // Calculation error detection + let actual_sum: u64 = result.counter_map.values().sum(); + error!( + topic = message.topic(), + offset = message.offset(), + expected = result.total_processed, + actual = actual_sum, + "✗ Data Validation FAILED: Sum mismatch" + ); + } + }, + Err(e) => { + // Handling malformed JSON or schema mismatch + error!( + topic = message.topic(), + offset = message.offset(), + raw_payload = payload, + error = ?e, + "Failed to deserialize JSON payload" + ); + } + } + + Ok(()) +} \ No newline at end of file diff --git a/examples/go-processor/Cargo.lock b/examples/go-processor/Cargo.lock new file mode 100644 index 00000000..19fd22c7 --- /dev/null +++ b/examples/go-processor/Cargo.lock @@ -0,0 +1,590 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "cc" +version = "1.2.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "go-processor-test" +version = "0.1.0" +dependencies = [ + "rdkafka", + "serde_json", + "tokio", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rdkafka" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f1856d72dbbbea0d2a5b2eaf6af7fb3847ef2746e883b11781446a51dbc85c0" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.9.0+2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" +dependencies = [ + "cmake", + "libc", + "libz-sys", + "num_enum", + "pkg-config", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "syn" +version = "2.0.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21f182278bf2d2bcb3c88b1b08a37df029d71ce3d3ae26168e3c653b213b99d4" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.10+spec-1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +dependencies = [ + "winnow", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + +[[package]] +name = "zmij" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de9211a9f64b825911bdf0240f58b7a8dac217fe260fc61f080a07f61372fbd5" diff --git a/examples/go-processor/build.sh b/examples/go-processor/build.sh new file mode 100755 index 00000000..bf03058e --- /dev/null +++ b/examples/go-processor/build.sh @@ -0,0 +1,202 @@ +#!/bin/bash + +# Go wasm Processor build script +# Compiles Go code to wasm Component and generates YAML configuration file + +set -e + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +# Task name (from environment variable or use default value) +TASK_NAME="${TASK_NAME:-go-processor-example}" + +echo -e "${GREEN}Building Go WASM Processor Component...${NC}" + +# Check if TinyGo is installed +if ! command -v tinygo &> /dev/null; then + echo -e "${RED}Error: tinygo is not installed${NC}" + echo "Please install TinyGo: https://tinygo.org/getting-started/install/" + exit 1 +fi + +# Check TinyGo version +TINYGO_VERSION=$(tinygo version | awk '{print $2}' | sed 's/tinygo version //') +echo -e "${GREEN}Using TinyGo $TINYGO_VERSION${NC}" + +# Step 1: Generate WIT bindings +echo -e "${GREEN}Step 1: Generating WIT bindings...${NC}" +chmod +x generate-bindings.sh +./generate-bindings.sh + +if [ $? -ne 0 ]; then + echo -e "${RED}Failed to generate WIT bindings${NC}" + exit 1 +fi + +# Check if bindings were generated +if [ ! -d "bindings" ] || [ -z "$(ls -A bindings 2>/dev/null)" ]; then + echo -e "${YELLOW}Warning: No bindings generated, continuing with placeholder implementation${NC}" +fi + + # Output directory structure: + # build/ - Final output files + # build/tmp/ - Temporary intermediate files (deleted after build) + # build/deps/ - Dependency files (kept, checked each time) + OUTPUT_DIR="build" + TMP_DIR="$OUTPUT_DIR/tmp" + DEPS_DIR="$OUTPUT_DIR/deps" + mkdir -p "$OUTPUT_DIR" + mkdir -p "$TMP_DIR" + mkdir -p "$DEPS_DIR" + +# Step 2: Compile Go to wasm using TinyGo +echo -e "${GREEN}Step 2: Compiling Go to WASM (using TinyGo)...${NC}" + +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +WIT_FILE="$PROJECT_ROOT/wit/processor.wit" +COMPONENT_FILE="$OUTPUT_DIR/processor.wasm" + +# Compile Go to wasm using TinyGo (core module) +echo " Compiling with TinyGo (target: wasi)..." +CORE_WASM="$TMP_DIR/processor-core.wasm" +tinygo build -target wasi -o "$CORE_WASM" main.go + +if [ $? -ne 0 ]; then + echo -e "${RED}Failed to compile Go to WASM${NC}" + exit 1 +fi + +# Verify generated file +if [ ! -f "$CORE_WASM" ]; then + echo -e "${RED}Error: Core WASM file was not created${NC}" + exit 1 +fi + +echo -e "${GREEN}✓ Go compiled to WASM (core module)${NC}" + +# Step 3: Convert to Component Model format +echo -e "${GREEN}Step 3: Converting to Component Model format...${NC}" + +# Check if wasm-tools is available (required) +if ! command -v wasm-tools &> /dev/null; then + echo -e "${RED}Error: wasm-tools is REQUIRED for Component Model conversion${NC}" + echo "Please install wasm-tools:" + echo " cargo install wasm-tools" + exit 1 +fi + +# 3.1: Embed WIT metadata +EMBEDDED_WASM="$TMP_DIR/processor-embedded.wasm" +echo " Embedding WIT metadata..." +wasm-tools component embed --world processor "$WIT_FILE" "$CORE_WASM" -o "$EMBEDDED_WASM" || { + echo -e "${RED}Failed to embed WIT metadata${NC}" + exit 1 +} + +# 3.2: Prepare WASI adapter (if needed) +WASI_ADAPTER_FILE="$DEPS_DIR/wasi_snapshot_preview1.reactor.wasm" +if [ ! -f "$WASI_ADAPTER_FILE" ]; then + echo " Downloading WASI adapter..." + ADAPTER_URLS=( + "https://github.com/bytecodealliance/wasmtime/releases/download/v24.0.0/wasi_snapshot_preview1.reactor.wasm" + "https://github.com/bytecodealliance/wasmtime/releases/download/v23.0.0/wasi_snapshot_preview1.reactor.wasm" + "https://github.com/bytecodealliance/wasmtime/releases/latest/download/wasi_snapshot_preview1.reactor.wasm" + ) + + DOWNLOADED=0 + for ADAPTER_URL in "${ADAPTER_URLS[@]}"; do + echo " Trying: $ADAPTER_URL" + if command -v curl &> /dev/null; then + if curl -L -f --progress-bar -o "$WASI_ADAPTER_FILE" "$ADAPTER_URL" 2>&1 && [ -s "$WASI_ADAPTER_FILE" ]; then + if [ "$(head -c 4 "$WASI_ADAPTER_FILE" | od -An -tx1 | tr -d ' \n')" = "0061736d" ]; then + DOWNLOADED=1 + echo " ✓ Successfully downloaded from: $ADAPTER_URL" + break + else + rm -f "$WASI_ADAPTER_FILE" + fi + fi + elif command -v wget &> /dev/null; then + if wget --progress=bar:force -O "$WASI_ADAPTER_FILE" "$ADAPTER_URL" 2>&1 && [ -s "$WASI_ADAPTER_FILE" ]; then + if [ "$(head -c 4 "$WASI_ADAPTER_FILE" | od -An -tx1 | tr -d ' \n')" = "0061736d" ]; then + DOWNLOADED=1 + echo " ✓ Successfully downloaded from: $ADAPTER_URL" + break + else + rm -f "$WASI_ADAPTER_FILE" + fi + fi + fi + done + + if [ $DOWNLOADED -eq 0 ]; then + echo -e "${RED}Failed to download WASI adapter${NC}" + exit 1 + fi + echo " ✓ WASI adapter downloaded and verified" +fi + +# 3.3: Convert to Component Model (final output) +echo " Converting to Component Model..." +# Use --realloc-via-memory-grow option because TinyGo-generated wasm lacks cabi_realloc export +wasm-tools component new "$EMBEDDED_WASM" \ + --adapt wasi_snapshot_preview1="$WASI_ADAPTER_FILE" \ + --realloc-via-memory-grow \ + -o "$COMPONENT_FILE" || { + echo -e "${RED}Failed to convert to Component Model${NC}" + exit 1 +} + +# Validate Component Model format +if wasm-tools validate "$COMPONENT_FILE" 2>&1 > /dev/null; then + echo -e "${GREEN}✓ Component Model format validated${NC}" +else + echo -e "${YELLOW}Warning: Component validation failed${NC}" +fi + +# Verify output file exists +if [ ! -f "$COMPONENT_FILE" ]; then + echo -e "${RED}Error: Output file was not created: $COMPONENT_FILE${NC}" + exit 1 +fi + +echo -e "${GREEN}✓ WASM Component built: $COMPONENT_FILE${NC}" + +# Display file information +if [ -f "$COMPONENT_FILE" ]; then + WASM_SIZE=$(du -h "$COMPONENT_FILE" | cut -f1) + echo -e "${GREEN}WASM module size: $WASM_SIZE${NC}" +else + echo -e "${RED}Error: Output file does not exist: $COMPONENT_FILE${NC}" + exit 1 +fi + +# Cleanup notes +echo "" +echo -e "${GREEN}Build completed!${NC}" +echo "" +echo -e "${GREEN}Directory structure:${NC}" +echo " $OUTPUT_DIR/" +echo " ├── processor.wasm # Final output file (Component Model format)" +echo " └── deps/" +echo " └── wasi_snapshot_preview1.reactor.wasm # WASI adapter (dependency file, kept)" +echo "" +echo -e "${YELLOW}Notes:${NC}" +echo " - Files in bindings/ directory are intermediate artifacts (Go binding code) used for compilation" +echo " - Temporary files in build/tmp/ directory have been automatically cleaned up" +echo " - Dependency files in build/deps/ directory are kept and checked for existence on next build" +echo " - Final output: $OUTPUT_DIR/processor.wasm (Component Model format)" +echo "" +echo -e "${YELLOW}Next steps:${NC}" +echo "1. Use the generated config.yaml to register the task" +echo "2. The WASM module is located at: $OUTPUT_DIR/processor.wasm" +echo -e "${GREEN}The WASM file is now in Component Model format and ready to use!${NC}" + diff --git a/examples/go-processor/config.yaml b/examples/go-processor/config.yaml new file mode 100644 index 00000000..1c49e6ef --- /dev/null +++ b/examples/go-processor/config.yaml @@ -0,0 +1,15 @@ +name: "go-processor-example" +type: processor +input-groups: + - inputs: + - input-type: kafka + bootstrap_servers: "localhost:9092" + topic: "input-topic" + partition: 0 + group_id: "go-processor-group" +outputs: + - output-type: kafka + bootstrap_servers: "localhost:9092" + topic: "output-topic" + partition: 0 + diff --git a/examples/go-processor/generate-bindings.sh b/examples/go-processor/generate-bindings.sh new file mode 100755 index 00000000..5df96184 --- /dev/null +++ b/examples/go-processor/generate-bindings.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -e + +echo "Generating Go bindings..." + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +WIT_FILE="$PROJECT_ROOT/wit/processor.wit" +OUT_DIR="$SCRIPT_DIR/bindings" + +if [ ! -f "$WIT_FILE" ]; then + echo "Error: WIT file not found: $WIT_FILE" + exit 1 +fi + +# Check wit-bindgen-go +WIT_BINDGEN_GO="" +if command -v wit-bindgen-go &> /dev/null; then + WIT_BINDGEN_GO="wit-bindgen-go" +elif [ -f "$HOME/go/bin/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$HOME/go/bin/wit-bindgen-go" +elif [ -n "$GOPATH" ] && [ -f "$GOPATH/bin/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$GOPATH/bin/wit-bindgen-go" +elif [ -n "$GOBIN" ] && [ -f "$GOBIN/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$GOBIN/wit-bindgen-go" +fi + +if [ -z "$WIT_BINDGEN_GO" ]; then + echo "Installing wit-bindgen-go..." + go install go.bytecodealliance.org/cmd/wit-bindgen-go@latest + if [ -f "$HOME/go/bin/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$HOME/go/bin/wit-bindgen-go" + elif [ -n "$GOPATH" ] && [ -f "$GOPATH/bin/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$GOPATH/bin/wit-bindgen-go" + elif [ -n "$GOBIN" ] && [ -f "$GOBIN/wit-bindgen-go" ]; then + WIT_BINDGEN_GO="$GOBIN/wit-bindgen-go" + else + echo "Error: Unable to find wit-bindgen-go" + exit 1 + fi +fi + +# Create output directory +mkdir -p "$OUT_DIR" + +# Generate bindings +echo "Running wit-bindgen-go generate..." +echo "WIT file: $WIT_FILE" +echo "Output directory: $OUT_DIR" +echo "World: processor" + +"$WIT_BINDGEN_GO" generate "$WIT_FILE" --world="processor" --out="$OUT_DIR" + +if [ $? -eq 0 ]; then + echo "✓ Bindings generated to $OUT_DIR/ directory" + echo "" + echo "Notes:" + echo " - These binding files are intermediate artifacts used for compiling Go code" + echo " - Only one WASM file will be generated: build/processor.wasm" + echo " - The number of binding files depends on the number of interfaces defined in the WIT file (kv, collector, processor, etc.)" + echo "" + echo "Note: If you encounter Go 1.24 wasmimport/wasmexport compatibility issues," + echo " please check if wit-bindgen-go has an updated version with support, or consider using other tools." +else + echo "✗ Failed to generate bindings" + exit 1 +fi + diff --git a/examples/go-processor/go.mod b/examples/go-processor/go.mod new file mode 100644 index 00000000..29367846 --- /dev/null +++ b/examples/go-processor/go.mod @@ -0,0 +1,8 @@ +module github.com/function-stream/go-processor-example + +go 1.24 + +require ( + go.bytecodealliance.org/cm v0.3.0 + github.com/IBM/sarama v1.43.0 +) diff --git a/examples/go-processor/go.sum b/examples/go-processor/go.sum new file mode 100644 index 00000000..cf6e1ee0 --- /dev/null +++ b/examples/go-processor/go.sum @@ -0,0 +1,2 @@ +go.bytecodealliance.org/cm v0.3.0 h1:VhV+4vjZPUGCozCg9+up+FNL3YU6XR+XKghk7kQ0vFc= +go.bytecodealliance.org/cm v0.3.0/go.mod h1:JD5vtVNZv7sBoQQkvBvAAVKJPhR/bqBH7yYXTItMfZI= diff --git a/examples/go-processor/main.go b/examples/go-processor/main.go new file mode 100644 index 00000000..ea499285 --- /dev/null +++ b/examples/go-processor/main.go @@ -0,0 +1,160 @@ +//go:build wasi || wasm + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "strconv" + "strings" + + "go.bytecodealliance.org/cm" + + "github.com/function-stream/go-processor-example/bindings/functionstream/core/collector" + "github.com/function-stream/go-processor-example/bindings/functionstream/core/kv" + "github.com/function-stream/go-processor-example/bindings/functionstream/core/processor" +) + +var store kv.Store +var counterMap map[string]int64 +var totalProcessed int64 +var keyPrefix string + +func init() { + counterMap = make(map[string]int64) + totalProcessed = 0 + keyPrefix = "" + + processor.Exports.FsInit = FsInit + processor.Exports.FsProcess = FsProcess + processor.Exports.FsProcessWatermark = FsProcessWatermark + processor.Exports.FsTakeCheckpoint = FsTakeCheckpoint + processor.Exports.FsCheckHeartbeat = FsCheckHeartbeat + processor.Exports.FsClose = FsClose + processor.Exports.FsExec = FsExec + processor.Exports.FsCustom = FsCustom +} + +// WIT: export fs-init: func(config: list>); +func FsInit(config cm.List[[2]string]) { + counterMap = make(map[string]int64) + totalProcessed = 0 + keyPrefix = "" + + configSlice := config.Slice() + for _, entry := range configSlice { + if entry[0] == "key_prefix" { + keyPrefix = entry[1] + } + } + + store = kv.NewStore("counter-store") +} + +// WIT: export fs-process: func(source-id: u32, data: list); +func FsProcess(sourceID uint32, data cm.List[uint8]) { + dataBytes := data.Slice() + inputStr := strings.TrimSpace(string(dataBytes)) + if inputStr == "" { + return + } + + totalProcessed++ + + fullKey := keyPrefix + inputStr + storeKeyBytes := []byte(fullKey) + key := cm.ToList(storeKeyBytes) + result := store.GetState(key) + + currentCount := int64(0) + if result.IsOK() { + opt := result.OK() + if opt != nil && !opt.None() { + valueList := opt.Some() + if valueList != nil { + valueBytes := valueList.Slice() + if count, err := strconv.ParseInt(string(valueBytes), 10, 64); err == nil { + currentCount = count + } + } + } + } + + newCount := currentCount + 1 + counterMap[inputStr] = newCount + + newCountStr := strconv.FormatInt(newCount, 10) + putResult := store.PutState(key, cm.ToList([]byte(newCountStr))) + if putResult.IsErr() { + return + } + + outputPayload := map[string]interface{}{ + "total_processed": totalProcessed, + "counter_map": counterMap, + } + + jsonBytes, err := json.Marshal(outputPayload) + if err != nil { + return + } + + collector.Emit(0, cm.ToList(jsonBytes)) +} + +// WIT: export fs-process-watermark: func(source-id: u32, watermark: u64); +func FsProcessWatermark(sourceID uint32, watermark uint64) { + collector.EmitWatermark(0, watermark) +} + +// WIT: export fs-take-checkpoint: func(checkpoint-id: u64) -> list; +func FsTakeCheckpoint(checkpointID uint64) cm.List[uint8] { + return cm.ToList([]byte{}) +} + +// WIT: export fs-check-heartbeat: func() -> bool; +func FsCheckHeartbeat() bool { + return true +} + +// WIT: export fs-close: func(); +func FsClose() { + counterMap = make(map[string]int64) + totalProcessed = 0 + + if store != 0 { + store.ResourceDrop() + } +} + +// WIT: export fs-exec: func(class-name: string, modules: list>>); +// Note: The actual type will be determined by the WIT binding generator. +// For now, using a placeholder that matches the WIT signature. +func FsExec(className string, modules cm.List[[2]interface{}]) { + // modules[0] should be string (module-name) + // modules[1] should be cm.List[uint8] (module-bytes) +} + +// WIT: export fs-custom: func(payload: list) -> list; +func FsCustom(payload cm.List[uint8]) cm.List[uint8] { + return payload +} + +func main() { +} + +//go:export cabi_realloc +func cabi_realloc(ptr uintptr, old_size, align, new_size uint32) uintptr { + return 0 +} diff --git a/examples/python-processor/config.yaml b/examples/python-processor/config.yaml new file mode 100644 index 00000000..6e84130e --- /dev/null +++ b/examples/python-processor/config.yaml @@ -0,0 +1,18 @@ +name: "python-processor-example" +type: python +init_config: + emit_threshold: "6" + class_name: "CounterProcessor" +input-groups: + - inputs: + - input-type: kafka + bootstrap_servers: "localhost:9092" + topic: "input-topic" + partition: 0 + group_id: "python-processor-group" +outputs: + - output-type: kafka + bootstrap_servers: "localhost:9092" + topic: "output-topic" + partition: 0 + diff --git a/examples/python-processor/main.py b/examples/python-processor/main.py new file mode 100644 index 00000000..ab9ec6e1 --- /dev/null +++ b/examples/python-processor/main.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging + +from fs_client.client import FsClient +from fs_client.config import WasmTaskBuilder, KafkaInput, KafkaOutput +import processor_impl + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - [%(levelname)s] - %(message)s", + datefmt="%H:%M:%S", +) +logger = logging.getLogger(__name__) + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Register python Processor via Function Stream python Client." + ) + parser.add_argument("--host", default="localhost", help="Function Stream host") + parser.add_argument("--port", type=int, default=8080, help="Function Stream port") + args = parser.parse_args() + + logger.info("Connecting to Function Stream at %s:%s", args.host, args.port) + + config = ( + WasmTaskBuilder() + .set_name("python-processor-example") + .add_init_config("emit_threshold", "6") + .add_init_config("class_name", "CounterProcessor") + .add_input_group( + [ + KafkaInput( + bootstrap_servers="localhost:9092", + topic="input-topic", + group_id="python-processor-group", + partition=0, + ) + ] + ) + .add_output(KafkaOutput("localhost:9092", "output-topic", 0)) + .build() + ) + + with FsClient(host=args.host, port=args.port) as client: + client.create_python_function_from_config(config, processor_impl.CounterProcessor) + logger.info("python processor registered successfully.") + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/examples/python-processor/processor_impl.py b/examples/python-processor/processor_impl.py new file mode 100644 index 00000000..b45629e1 --- /dev/null +++ b/examples/python-processor/processor_impl.py @@ -0,0 +1,103 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Dict + +from fs_api import FSProcessorDriver, Context + +class CounterProcessor(FSProcessorDriver): + """ + Real-time Counter Processor. + Function: Updates state and emits a snapshot for EVERY single message processed. + """ + def __init__(self) -> None: + self._counter_map: Dict[str, int] = {} + self._total_processed: int = 0 + self._store_name: str = "counter-store" + self._key_prefix: str = "" + + def init(self, ctx: Context, config: dict) -> None: + """Initialize the processor.""" + self._counter_map = {} + self._total_processed = 0 + + # Safe configuration parsing + if isinstance(config, dict): + # Parse Key prefix + self._key_prefix = str(config.get("key_prefix", "")) + + def process(self, ctx: Context, source_id: int, data: bytes) -> None: + """Core processing logic: One input -> One output.""" + try: + # 1. Decode data + input_str = data.decode("utf-8", errors="replace").strip() + if not input_str: + return + + self._total_processed += 1 + + # 2. State Management (Load -> Modify -> Store) + store = ctx.getOrCreateKVStore(self._store_name) + + full_key = f"{self._key_prefix}{input_str}" + store_key_bytes = full_key.encode("utf-8") + + current_count = 0 + stored_val = store.get_state(store_key_bytes) + + if stored_val: + try: + current_count = int(stored_val.decode("utf-8")) + except ValueError: + current_count = 0 + + new_count = current_count + 1 + + # Update memory and persistence + self._counter_map[input_str] = new_count + store.put_state(store_key_bytes, str(new_count).encode("utf-8")) + + # 3. Trigger Output Emission + # REQUIREMENT: Emit output for every single message processed + self._emit_snapshot(ctx) + + except Exception as e: + raise e + + def _emit_snapshot(self, ctx: Context): + """Helper: Serialize and emit current state.""" + output_payload = { + "total_processed": self._total_processed, + "counter_map": self._counter_map + } + + payload_bytes = json.dumps(output_payload).encode("utf-8") + ctx.emit(payload_bytes, 0) + + def process_watermark(self, ctx: Context, source_id: int, watermark: int): + ctx.emit_watermark(watermark, 0) + + def take_checkpoint(self, ctx: Context, checkpoint_id: int): + return None + + def check_heartbeat(self, ctx: Context) -> bool: + return True + + def close(self, ctx: Context): + self._counter_map = {} + self._total_processed = 0 + + def custom(self, payload: bytes) -> bytes: + return b'{"error": "Unknown command"}' + + diff --git a/examples/python-processor/serializer.py b/examples/python-processor/serializer.py new file mode 100644 index 00000000..fb9490ce --- /dev/null +++ b/examples/python-processor/serializer.py @@ -0,0 +1,142 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pickletools +import sys +from typing import Any, Tuple + +import cloudpickle + +processor_code = ''' +import json +from typing import Dict + +from fs_api import FSProcessorDriver, Context + +class CounterProcessor(FSProcessorDriver): + """ + Real-time Counter Processor. + Function: Updates state and emits a snapshot for EVERY single message processed. + """ + def __init__(self) -> None: + self._counter_map: Dict[str, int] = {} + self._total_processed: int = 0 + self._store_name: str = "counter-store" + self._key_prefix: str = "" + + def init(self, ctx: Context, config: dict) -> None: + """Initialize the processor.""" + self._counter_map = {} + self._total_processed = 0 + + # Safe configuration parsing + if isinstance(config, dict): + # Parse Key prefix + self._key_prefix = str(config.get("key_prefix", "")) + + def process(self, ctx: Context, source_id: int, data: bytes) -> None: + """Core processing logic: One input -> One output.""" + try: + # 1. Decode data + input_str = data.decode("utf-8", errors="replace").strip() + if not input_str: + return + + self._total_processed += 1 + + # 2. State Management (Load -> Modify -> Store) + store = ctx.getOrCreateKVStore(self._store_name) + + full_key = f"{self._key_prefix}{input_str}" + store_key_bytes = full_key.encode("utf-8") + + current_count = 0 + stored_val = store.get_state(store_key_bytes) + + if stored_val: + try: + current_count = int(stored_val.decode("utf-8")) + except ValueError: + current_count = 0 + + new_count = current_count + 1 + + # Update memory and persistence + self._counter_map[input_str] = new_count + store.put_state(store_key_bytes, str(new_count).encode("utf-8")) + + # 3. Trigger Output Emission + # REQUIREMENT: Emit output for every single message processed + self._emit_snapshot(ctx) + + except Exception as e: + raise e + + def _emit_snapshot(self, ctx: Context): + """Helper: Serialize and emit current state.""" + output_payload = { + "total_processed": self._total_processed, + "counter_map": self._counter_map + } + + payload_bytes = json.dumps(output_payload).encode("utf-8") + ctx.emit(payload_bytes, 0) + + def process_watermark(self, ctx: Context, source_id: int, watermark: int): + ctx.emit_watermark(watermark, 0) + + def take_checkpoint(self, ctx: Context, checkpoint_id: int): + return None + + def check_heartbeat(self, ctx: Context) -> bool: + return True + + def close(self, ctx: Context): + self._counter_map = {} + self._total_processed = 0 + + def custom(self, payload: bytes) -> bytes: + return b'{"error": "Unknown command"}' +''' + +def serialize_by_value(obj: Any) -> Tuple[str, bytes]: + """ + Serialize object by value (module code embedded) to avoid filesystem imports. + + Uses cloudpickle.register_pickle_by_value(module) then cloudpickle.dumps(obj). + Returns (module_name, serialized_bytes). + """ + module_name = None + if hasattr(obj, "__module__"): + module_name = obj.__module__ + elif hasattr(obj, "__class__") and hasattr(obj.__class__, "__module__"): + module_name = obj.__class__.__module__ + + if not module_name or module_name == "__main__": + return (module_name or "__main__", cloudpickle.dumps(obj)) + + target_module = sys.modules.get(module_name) + if target_module is None: + raise ValueError(f"Module {module_name} not found in sys.modules") + + cloudpickle.register_pickle_by_value(target_module) + base_package = module_name.split(".")[0] + for name, mod in list(sys.modules.items()): + if mod is not None and name.startswith(base_package): + cloudpickle.register_pickle_by_value(mod) + local_env = {} + global_env = globals().copy() + exec(processor_code,local_env) + del local_env['__builtins__'] + payload = cloudpickle.dumps(local_env) + pickletools.dis(payload) + return (module_name, payload) + diff --git a/fs/api/func_ctx.go b/fs/api/func_ctx.go deleted file mode 100644 index 52a8a569..00000000 --- a/fs/api/func_ctx.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "context" - - "github.com/functionstream/function-stream/fs/contube" -) - -type FunctionContext interface { - PutState(ctx context.Context, key string, value []byte) error - GetState(ctx context.Context, key string) ([]byte, error) - ListStates(ctx context.Context, startInclusive string, endExclusive string) ([]string, error) - DeleteState(ctx context.Context, key string) error - Write(record contube.Record) error - GetConfig() map[string]string -} diff --git a/fs/api/instance.go b/fs/api/instance.go deleted file mode 100644 index 01e27f8f..00000000 --- a/fs/api/instance.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/contube" - "golang.org/x/net/context" -) - -type FunctionInstance interface { - Context() context.Context - FunctionContext() FunctionContext - Definition() *model.Function - Index() int32 - Stop() - Run(runtime FunctionRuntime, sources []<-chan contube.Record, sink chan<- contube.Record) - Logger() *common.Logger -} - -type FunctionInstanceFactory interface { - NewFunctionInstance(f *model.Function, funcCtx FunctionContext, i int32, logger *common.Logger) FunctionInstance -} diff --git a/fs/api/package.go b/fs/api/package.go deleted file mode 100644 index b5d5ed33..00000000 --- a/fs/api/package.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import "github.com/functionstream/function-stream/common/model" - -type Package interface { - GetSupportedRuntimeConfig() []model.RuntimeConfig -} - -type PackageLoader interface { - Load(path string) (Package, error) -} diff --git a/fs/api/runtime.go b/fs/api/runtime.go deleted file mode 100644 index 03cde4cf..00000000 --- a/fs/api/runtime.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/contube" -) - -type FunctionRuntime interface { - Call(e contube.Record) (contube.Record, error) - Stop() -} - -type FunctionRuntimeFactory interface { - NewFunctionRuntime(instance FunctionInstance, rc *model.RuntimeConfig) (FunctionRuntime, error) -} diff --git a/fs/api/statestore.go b/fs/api/statestore.go deleted file mode 100644 index 859696bd..00000000 --- a/fs/api/statestore.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package api - -import ( - "context" - - "github.com/functionstream/function-stream/common/model" - - "github.com/pkg/errors" -) - -var ErrNotFound = errors.New("key not found") - -type StateStore interface { - PutState(ctx context.Context, key string, value []byte) error - GetState(ctx context.Context, key string) (value []byte, err error) - ListStates(ctx context.Context, startInclusive string, endExclusive string) (keys []string, err error) - DeleteState(ctx context.Context, key string) error - Close() error -} - -type StateStoreFactory interface { - NewStateStore(f *model.Function) (StateStore, error) - Close() error -} diff --git a/fs/contube/contube.go b/fs/contube/contube.go deleted file mode 100644 index a0aaae46..00000000 --- a/fs/contube/contube.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "context" - "encoding/json" - - "github.com/go-playground/validator/v10" - "github.com/pkg/errors" -) - -var ( - ErrTubeNotImplemented = errors.New("tube not implemented") - ErrSinkTubeNotImplemented = errors.Wrap(ErrTubeNotImplemented, "sink tube not implemented") - ErrSourceTubeNotImplemented = errors.Wrap(ErrTubeNotImplemented, "source tube not implemented") -) - -type Record interface { - GetPayload() []byte - GetSchema() string - Commit() -} - -type SourceQueueConfig struct { - Topics []string `json:"inputs" validate:"required"` - SubName string `json:"subscription-name" validate:"required"` -} - -type SinkQueueConfig struct { - Topic string `json:"output" validate:"required"` -} - -func (c *SourceQueueConfig) ToConfigMap() ConfigMap { - configMap, _ := ToConfigMap(c) - return configMap -} - -func (c *SinkQueueConfig) ToConfigMap() ConfigMap { - configMap, _ := ToConfigMap(c) - return configMap -} - -type ConfigMap map[string]interface{} - -// MergeConfig merges multiple ConfigMap into one -func MergeConfig(configs ...ConfigMap) ConfigMap { - result := ConfigMap{} - for _, config := range configs { - for k, v := range config { - result[k] = v - } - } - return result -} - -func (c ConfigMap) ToConfigStruct(v any) error { - jsonData, err := json.Marshal(c) - if err != nil { - return err - } - if err := json.Unmarshal(jsonData, v); err != nil { - return err - } - validate := validator.New() - return validate.Struct(v) -} - -func ToConfigMap(v any) (ConfigMap, error) { - jsonData, err := json.Marshal(v) - if err != nil { - return nil, err - } - var result ConfigMap - if err := json.Unmarshal(jsonData, &result); err != nil { - return nil, err - } - return result, nil -} - -type SourceTubeFactory interface { - // NewSourceTube returns a new channel that can be used to receive events - // The channel would be closed when the context is done - NewSourceTube(ctx context.Context, config ConfigMap) (<-chan Record, error) -} - -type SinkTubeFactory interface { - // NewSinkTube returns a new channel that can be used to sink events - // The event.Commit() would be invoked after the event is sunk successfully - // The caller should close the channel when it is done - NewSinkTube(ctx context.Context, config ConfigMap) (chan<- Record, error) -} - -type TubeFactory interface { - SourceTubeFactory - SinkTubeFactory -} - -type RecordImpl struct { - payload []byte - schema string - commitFunc func() -} - -func NewRecordImpl(payload []byte, ackFunc func()) *RecordImpl { - return &RecordImpl{ - payload: payload, - commitFunc: ackFunc, - } -} - -func NewStructRecord(payload any, ackFunc func()) (*RecordImpl, error) { - data, err := json.Marshal(payload) - if err != nil { - return nil, err - } - return &RecordImpl{ - payload: data, - commitFunc: ackFunc, - }, nil -} - -func NewSchemaRecordImpl(payload []byte, schema string, ackFunc func()) *RecordImpl { - return &RecordImpl{ - payload: payload, - schema: schema, - commitFunc: ackFunc, - } -} - -func (e *RecordImpl) GetPayload() []byte { - return e.payload -} - -func (e *RecordImpl) GetSchema() string { - return e.schema -} - -func (e *RecordImpl) Commit() { - if e.commitFunc != nil { - e.commitFunc() - } -} - -type emptyTubeFactory struct { -} - -func NewEmptyTubeFactory() TubeFactory { - return &emptyTubeFactory{} -} - -func (f *emptyTubeFactory) NewSourceTube(ctx context.Context, config ConfigMap) (<-chan Record, error) { - ch := make(chan Record) - go func() { - <-ctx.Done() - close(ch) - }() - return ch, nil -} - -func (f *emptyTubeFactory) NewSinkTube(ctx context.Context, config ConfigMap) (chan<- Record, error) { - ch := make(chan Record) - go func() { - for { - select { - case <-ctx.Done(): - return - case <-ch: - continue - } - } - }() - return ch, nil -} diff --git a/fs/contube/http.go b/fs/contube/http.go deleted file mode 100644 index 0816f950..00000000 --- a/fs/contube/http.go +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "io" - "net/http" - "sync" - "sync/atomic" - - "github.com/functionstream/function-stream/common" - - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -type state int - -const ( - EndpointKey = "endpoint" - IncludeMetadata = "includeMetadata" - - stateReady state = iota // TODO: Why do we need this? Maybe we can remove it. - stateClosed state = iota -) - -var ( - ErrEndpointNotFound = errors.New("endpoint not found") - ErrEndpointClosed = errors.New("endpoint closed") - ErrorEndpointAlreadyExists = errors.New("endpoint already exists") -) - -type EndpointHandler func(ctx context.Context, endpoint string, payload []byte) error -type HttpHandler func(w http.ResponseWriter, r *http.Request, payload []byte) Record - -func DefaultHttpHandler(_ http.ResponseWriter, _ *http.Request, payload []byte) Record { - return NewRecordImpl(payload, func() {}) -} - -type endpointHandler struct { - ctx context.Context - s atomic.Value - c chan Record -} - -type HttpTubeFactory struct { - TubeFactory - ctx context.Context - mu sync.RWMutex - endpoints map[string]*endpointHandler - handler HttpHandler -} - -func NewHttpTubeFactory(ctx context.Context) *HttpTubeFactory { - return NewHttpTubeFactoryWithIntercept(ctx, DefaultHttpHandler) -} - -func NewHttpTubeFactoryWithIntercept(ctx context.Context, handler HttpHandler) *HttpTubeFactory { - return &HttpTubeFactory{ - ctx: ctx, - endpoints: make(map[string]*endpointHandler), - handler: handler, - } -} - -type httpSourceTubeConfig struct { - Endpoint string `json:"endpoint" validate:"required"` -} - -func (f *HttpTubeFactory) getEndpoint(endpoint string) (*endpointHandler, bool) { - f.mu.RLock() - defer f.mu.RUnlock() - e, ok := f.endpoints[endpoint] - return e, ok -} - -func (f *HttpTubeFactory) Handle(ctx context.Context, endpoint string, record Record) error { - e, ok := f.getEndpoint(endpoint) - if !ok { - return ErrEndpointNotFound - } - if e.s.Load() == stateClosed { - return ErrEndpointClosed - } - select { - case e.c <- record: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-e.ctx.Done(): - return ErrEndpointClosed - } -} - -func (f *HttpTubeFactory) NewSourceTube(ctx context.Context, config ConfigMap) (<-chan Record, error) { - c := httpSourceTubeConfig{} - if err := config.ToConfigStruct(&c); err != nil { - return nil, err - } - result := make(chan Record, 10) - f.mu.Lock() - defer f.mu.Unlock() - if _, ok := f.endpoints[c.Endpoint]; ok { - return nil, ErrorEndpointAlreadyExists - } - var s atomic.Value - s.Store(stateReady) - handlerCtx, cancel := context.WithCancel(f.ctx) - e := &endpointHandler{ - c: result, - s: s, - ctx: handlerCtx, - } - f.endpoints[c.Endpoint] = e - go func() { - <-ctx.Done() - cancel() - close(result) - f.mu.Lock() - defer f.mu.Unlock() - delete(f.endpoints, c.Endpoint) - }() - return result, nil -} - -func (f *HttpTubeFactory) NewSinkTube(_ context.Context, _ ConfigMap) (chan<- Record, error) { - return nil, ErrSinkTubeNotImplemented -} - -func (f *HttpTubeFactory) GetHandleFunc(getEndpoint func(r *http.Request) (string, error), - logger *common.Logger) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - endpoint, err := getEndpoint(r) - if err != nil { - logger.Error(err, "Failed to get endpoint") - http.Error(w, errors.Wrap(err, "Failed to get endpoint").Error(), http.StatusBadRequest) - return - } - log := logger.SubLogger("endpoint", endpoint, "component", "http-tube") - if log.DebugEnabled() { - log.Debug("Handle records from http request") - } - content, err := io.ReadAll(r.Body) - if err != nil { - log.Error(err, "Failed to read body") - http.Error(w, errors.Wrap(err, "Failed to read body").Error(), http.StatusBadRequest) - return - } - record := f.handler(w, r, content) - err = f.Handle(r.Context(), endpoint, record) - if err != nil { - log.Error(err, "Failed to handle record") - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if log.DebugEnabled() { - log.Debug("Handled records from http request") - } - } -} diff --git a/fs/contube/http_test.go b/fs/contube/http_test.go deleted file mode 100644 index 5883c02e..00000000 --- a/fs/contube/http_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" -) - -func TestHttpTubeHandleRecord(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - f := NewHttpTubeFactory(ctx) - - endpoint := "test" - err := f.Handle(ctx, "test", NewRecordImpl([]byte("test"), nil)) - assert.ErrorIs(t, err, ErrEndpointNotFound) - - config := make(ConfigMap) - config[EndpointKey] = endpoint - source, err := f.NewSourceTube(ctx, config) - assert.NoError(t, err) - _, err = f.NewSourceTube(ctx, config) - assert.ErrorIs(t, err, ErrorEndpointAlreadyExists) - - err = f.Handle(ctx, endpoint, NewRecordImpl([]byte("test"), nil)) - assert.Nil(t, err) - - record := <-source - assert.Equal(t, "test", string(record.GetPayload())) - - cancel() - - assert.Nil(t, <-source) - err = f.Handle(ctx, endpoint, NewRecordImpl([]byte("test"), nil)) - assert.Error(t, err, ErrEndpointNotFound) -} - -func TestHttpTubeSinkTubeNotImplement(t *testing.T) { - f := NewHttpTubeFactory(context.Background()) - _, err := f.NewSinkTube(context.Background(), make(ConfigMap)) - assert.ErrorIs(t, err, ErrSinkTubeNotImplemented) -} diff --git a/fs/contube/memory.go b/fs/contube/memory.go deleted file mode 100644 index 57ac7bd5..00000000 --- a/fs/contube/memory.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "context" - "log/slog" - "sync" - "sync/atomic" -) - -type queue struct { - c chan Record - refCnt int32 -} - -type MemoryQueueFactory struct { - ctx context.Context - mu sync.Mutex - queues map[string]*queue -} - -func NewMemoryQueueFactory(ctx context.Context) TubeFactory { - return &MemoryQueueFactory{ - ctx: ctx, - queues: make(map[string]*queue), - } -} - -func (f *MemoryQueueFactory) getOrCreateChan(name string) chan Record { - f.mu.Lock() - defer f.mu.Unlock() - defer func() { - slog.DebugContext(f.ctx, "Get memory queue chan", - "current_use_count", atomic.LoadInt32(&f.queues[name].refCnt), - "name", name) - }() - if q, ok := f.queues[name]; ok { - atomic.AddInt32(&q.refCnt, 1) - return q.c - } - c := make(chan Record, 100) - f.queues[name] = &queue{ - c: c, - refCnt: 1, - } - return c -} - -func (f *MemoryQueueFactory) release(name string) { - f.mu.Lock() - defer f.mu.Unlock() - q, ok := f.queues[name] - if !ok { - panic("release non-exist queue: " + name) - } - if atomic.AddInt32(&q.refCnt, -1) == 0 { - close(q.c) - delete(f.queues, name) - } - slog.DebugContext(f.ctx, "Released memory queue", - "current_use_count", atomic.LoadInt32(&q.refCnt), - "name", name) -} - -func (f *MemoryQueueFactory) NewSourceTube(ctx context.Context, configMap ConfigMap) (<-chan Record, error) { - config := SourceQueueConfig{} - if err := configMap.ToConfigStruct(&config); err != nil { - return nil, err - } - result := make(chan Record) - - var wg sync.WaitGroup - for _, topic := range config.Topics { - t := topic - wg.Add(1) - go func() { - <-ctx.Done() - f.release(t) - }() - c := f.getOrCreateChan(t) - go func() { - defer wg.Done() - for { - select { - case <-ctx.Done(): - return - case event := <-c: - result <- event - } - } - }() - } - - go func() { - wg.Wait() - close(result) - }() - - return result, nil -} - -func (f *MemoryQueueFactory) NewSinkTube(ctx context.Context, configMap ConfigMap) (chan<- Record, error) { - config := SinkQueueConfig{} - if err := configMap.ToConfigStruct(&config); err != nil { - return nil, err - } - c := f.getOrCreateChan(config.Topic) - wrapperC := make(chan Record) - go func() { - defer f.release(config.Topic) - for { - select { - case <-ctx.Done(): - return - case event, ok := <-wrapperC: - if !ok { - return - } - c <- event - } - } - }() - return wrapperC, nil -} diff --git a/fs/contube/memory_test.go b/fs/contube/memory_test.go deleted file mode 100644 index f3627bad..00000000 --- a/fs/contube/memory_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "context" - "math/rand" - "strconv" - "sync" - "testing" - "time" -) - -func TestMemoryTube(t *testing.T) { - - ctx, cancel := context.WithCancel(context.Background()) - tubeFactory := NewMemoryQueueFactory(ctx) - memoryQueueFactory := tubeFactory.(*MemoryQueueFactory) - - var wg sync.WaitGroup - var events []Record - - topics := []string{"topic1", "topic2", "topic3"} - source, err := memoryQueueFactory.NewSourceTube(ctx, (&SourceQueueConfig{Topics: topics, - SubName: "consume-" + strconv.Itoa(rand.Int())}).ToConfigMap()) - if err != nil { - t.Fatal(err) - } - - for i, v := range topics { - wg.Add(1) - sink, err := memoryQueueFactory.NewSinkTube(ctx, (&SinkQueueConfig{Topic: v}).ToConfigMap()) - if err != nil { - t.Fatal(err) - } - go func(i int) { - defer wg.Done() - defer close(sink) - sink <- NewRecordImpl([]byte{byte(i + 1)}, func() {}) - }(i) - } - - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case event := <-source: - events = append(events, event) - if len(events) == len(topics) { - return - } - default: - continue - } - } - }() - - wg.Wait() - cancel() - - // Give enough time to ensure that the goroutine execution within NewSource Tube and NewSinkTube is complete and - // the released queue is successful. - time.Sleep(100 * time.Millisecond) - - // assert the memoryQueueFactory.queues is empty. - memoryQueueFactory.mu.Lock() - if len(memoryQueueFactory.queues) != 0 { - t.Fatal("MemoryQueueFactory.queues is not empty") - } - memoryQueueFactory.mu.Unlock() - -} diff --git a/fs/contube/nats.go b/fs/contube/nats.go deleted file mode 100644 index 598a7592..00000000 --- a/fs/contube/nats.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "context" - "time" - - "github.com/functionstream/function-stream/common" - "github.com/nats-io/nats.go" - "github.com/pkg/errors" -) - -type NatsTubeFactoryConfig struct { - NatsURL string `json:"nats_url"` -} - -type NatsEventQueueFactory struct { - nc *nats.Conn -} - -type NatsSourceTubeConfig struct { - Subject string `json:"subject" validate:"required"` -} - -func (n NatsEventQueueFactory) NewSourceTube(ctx context.Context, configMap ConfigMap) (<-chan Record, error) { - config := &NatsSourceTubeConfig{} - if err := configMap.ToConfigStruct(config); err != nil { - return nil, err - } - c := make(chan Record) - sub, err := n.nc.SubscribeSync(config.Subject) - if err != nil { - return nil, err - } - log := common.NewDefaultLogger() - go func() { - for { - msg, err := sub.NextMsg(10 * time.Millisecond) - if err != nil { - if !errors.Is(err, nats.ErrTimeout) { - log.Error(err, "Failed to get next message", "subject", config.Subject) - } - continue - } - select { - case c <- NewRecordImpl(msg.Data, func() { - _ = msg.Ack() - }): // do nothing - case <-ctx.Done(): - return - } - } - }() - return c, nil -} - -type NatsSinkTubeConfig struct { - Subject string `json:"subject" validate:"required"` -} - -func (n NatsEventQueueFactory) NewSinkTube(ctx context.Context, configMap ConfigMap) (chan<- Record, error) { - config := &NatsSinkTubeConfig{} - if err := configMap.ToConfigStruct(config); err != nil { - return nil, err - } - c := make(chan Record) - log := common.NewDefaultLogger() - go func() { - for { - select { - case <-ctx.Done(): - return - case event, ok := <-c: - if !ok { - return - } - err := n.nc.Publish(config.Subject, event.GetPayload()) - log.Info("Published message", "subject", config.Subject, "err", err) - if err != nil { - log.Error(err, "Failed to publish message", "subject", config.Subject) - continue - } - event.Commit() - } - } - }() - return c, nil -} - -func NewNatsEventQueueFactory(ctx context.Context, configMap ConfigMap) (TubeFactory, error) { - config := &NatsTubeFactoryConfig{} - if err := configMap.ToConfigStruct(config); err != nil { - return nil, err - } - if config.NatsURL == "" { - config.NatsURL = "nats://localhost:4222" - } - nc, err := nats.Connect(config.NatsURL) - if err != nil { - return nil, err - } - log := common.NewDefaultLogger() - go func() { - <-ctx.Done() - // Close the nats queue factory - log.Info("Closing nats queue factory", "url", config.NatsURL) - err := nc.Drain() - if err != nil { - log.Error(err, "Failed to drain nats connection", "url", config.NatsURL) - } - }() - return &NatsEventQueueFactory{ - nc: nc, - }, nil -} diff --git a/fs/contube/pulsar.go b/fs/contube/pulsar.go deleted file mode 100644 index 8470d161..00000000 --- a/fs/contube/pulsar.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package contube - -import ( - "context" - "log/slog" - "sync/atomic" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/pkg/errors" -) - -const ( - PulsarURLKey = "pulsar_url" -) - -type PulsarTubeFactoryConfig struct { - PulsarURL string -} - -func NewPulsarTubeFactoryConfig(configMap ConfigMap) (*PulsarTubeFactoryConfig, error) { - var result PulsarTubeFactoryConfig - if pulsarURL, ok := configMap[PulsarURLKey].(string); ok { - result.PulsarURL = pulsarURL - } else { - result.PulsarURL = "pulsar://localhost:6650" - } - return &result, nil -} - -func (c *PulsarTubeFactoryConfig) ToConfigMap() ConfigMap { - return ConfigMap{ - PulsarURLKey: c.PulsarURL, - } -} - -type PulsarEventQueueFactory struct { - newSourceChan func(ctx context.Context, config *SourceQueueConfig) (<-chan Record, error) - newSinkChan func(ctx context.Context, config *SinkQueueConfig) (chan<- Record, error) -} - -func (f *PulsarEventQueueFactory) NewSourceTube(ctx context.Context, configMap ConfigMap) (<-chan Record, error) { - config := SourceQueueConfig{} - if err := configMap.ToConfigStruct(&config); err != nil { - return nil, err - } - return f.newSourceChan(ctx, &config) -} - -func (f *PulsarEventQueueFactory) NewSinkTube(ctx context.Context, configMap ConfigMap) (chan<- Record, error) { - config := SinkQueueConfig{} - if err := configMap.ToConfigStruct(&config); err != nil { - return nil, err - } - return f.newSinkChan(ctx, &config) -} - -func NewPulsarEventQueueFactory(ctx context.Context, configMap ConfigMap) (TubeFactory, error) { - config, err := NewPulsarTubeFactoryConfig(configMap) - if err != nil { - return nil, err - } - pc, err := pulsar.NewClient(pulsar.ClientOptions{ - URL: config.PulsarURL, - }) - if err != nil { - return nil, err - } - var closed atomic.Bool // TODO: Remove this after the bug of Producer.Flush is fixed - go func() { - <-ctx.Done() - slog.InfoContext(ctx, "Closing Pulsar event queue factory", slog.Any("config", config)) - closed.Store(true) - pc.Close() - }() - handleErr := func(ctx context.Context, err error, message string, args ...interface{}) { - if errors.Is(err, context.Canceled) { - slog.InfoContext(ctx, "Pulsar queue cancelled", slog.Any("config", config)) - return - } - extraArgs := append(args, slog.Any("config", config), slog.Any("error", err)) - slog.ErrorContext(ctx, message, extraArgs...) - } - log := func(message string, config interface{}, args ...interface{}) { - slog.InfoContext(ctx, message, append(args, slog.Any("config", config))...) - } - return &PulsarEventQueueFactory{ - newSourceChan: func(ctx context.Context, config *SourceQueueConfig) (<-chan Record, error) { - c := make(chan Record) - consumer, err := pc.Subscribe(pulsar.ConsumerOptions{ - Topics: config.Topics, - SubscriptionName: config.SubName, - Type: pulsar.Failover, - }) - if err != nil { - return nil, errors.Wrap(err, "Error creating consumer") - } - log("Pulsar source queue created", config) - go func() { - defer log("Pulsar source queue closed", config) - defer consumer.Close() - defer close(c) - for msg := range consumer.Chan() { - c <- NewRecordImpl(msg.Payload(), func() { - err := consumer.Ack(msg) - if err != nil { - handleErr(ctx, err, "Error acknowledging message", "error", err) - return - } - }) - } - }() - return c, nil - }, - newSinkChan: func(ctx context.Context, config *SinkQueueConfig) (chan<- Record, error) { - c := make(chan Record) - producer, err := pc.CreateProducer(pulsar.ProducerOptions{ - Topic: config.Topic, - }) - if err != nil { - return nil, errors.Wrap(err, "Error creating producer") - } - log("Pulsar sink queue created", config) - go func() { - defer log("Pulsar sink queue closed", config) - defer producer.Close() - flush := func() { - if closed.Load() { - return - } - err := producer.Flush() - if err != nil { - handleErr(ctx, err, "Error flushing producer", "error", err) - } - } - for { - select { - case e, ok := <-c: - if !ok { - flush() - return - } - schemaDef := e.GetSchema() - var schema pulsar.Schema - if schemaDef != "" { - schema = pulsar.NewJSONSchema(schemaDef, nil) - } - producer.SendAsync(ctx, &pulsar.ProducerMessage{ - Payload: e.GetPayload(), - Schema: schema, - }, func(id pulsar.MessageID, message *pulsar.ProducerMessage, err error) { - if err != nil { - handleErr(ctx, err, "Error sending message", "error", err, "messageId", id) - return - } - e.Commit() - }) - case <-ctx.Done(): - flush() - return - } - } - }() - return c, nil - }, - }, nil -} diff --git a/fs/func_ctx_impl.go b/fs/func_ctx_impl.go deleted file mode 100644 index c57d6971..00000000 --- a/fs/func_ctx_impl.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "context" - - "github.com/functionstream/function-stream/common/model" - - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/pkg/errors" -) - -var ErrStateStoreNotLoaded = errors.New("state store not loaded") - -type funcCtxImpl struct { - api.FunctionContext - function *model.Function - stateStore api.StateStore - sink chan<- contube.Record -} - -func newFuncCtxImpl(function *model.Function, store api.StateStore) *funcCtxImpl { - return &funcCtxImpl{function: function, stateStore: store} -} - -func (f *funcCtxImpl) checkStateStore() error { - if f.stateStore == nil { - return ErrStateStoreNotLoaded - } - return nil -} - -func (f *funcCtxImpl) PutState(ctx context.Context, key string, value []byte) error { - if err := f.checkStateStore(); err != nil { - return err - } - return f.stateStore.PutState(ctx, key, value) -} - -func (f *funcCtxImpl) GetState(ctx context.Context, key string) ([]byte, error) { - if err := f.checkStateStore(); err != nil { - return nil, err - } - return f.stateStore.GetState(ctx, key) -} - -func (f *funcCtxImpl) ListStates(ctx context.Context, startInclusive string, endExclusive string) ([]string, error) { - if err := f.checkStateStore(); err != nil { - return nil, err - } - return f.stateStore.ListStates(ctx, startInclusive, endExclusive) -} - -func (f *funcCtxImpl) DeleteState(ctx context.Context, key string) error { - if err := f.checkStateStore(); err != nil { - return err - } - return f.stateStore.DeleteState(ctx, key) -} - -func (f *funcCtxImpl) Write(record contube.Record) error { - if f.sink == nil { - return errors.New("sink not set") - } - f.sink <- record - return nil -} - -func (f *funcCtxImpl) GetConfig() map[string]string { - return f.function.Config -} - -func (f *funcCtxImpl) setSink(sink chan<- contube.Record) { - f.sink = sink -} diff --git a/fs/func_ctx_impl_test.go b/fs/func_ctx_impl_test.go deleted file mode 100644 index e6eec03b..00000000 --- a/fs/func_ctx_impl_test.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFuncCtx_NilStore(t *testing.T) { - f := newFuncCtxImpl(nil, nil) - assert.ErrorIs(t, f.PutState(context.Background(), "key", []byte("value")), ErrStateStoreNotLoaded) - _, err := f.GetState(context.Background(), "key") - assert.ErrorIs(t, err, ErrStateStoreNotLoaded) -} diff --git a/fs/instance_impl.go b/fs/instance_impl.go deleted file mode 100644 index b5b0e046..00000000 --- a/fs/instance_impl.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "context" - "reflect" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/pkg/errors" -) - -type FunctionInstanceImpl struct { - ctx context.Context - funcCtx api.FunctionContext - cancelFunc context.CancelFunc - definition *model.Function - readyCh chan error - index int32 - logger *common.Logger -} - -type CtxKey string - -const ( - CtxKeyFunctionName CtxKey = "function-name" - CtxKeyInstanceIndex CtxKey = "instance-index" -) - -type DefaultInstanceFactory struct { - api.FunctionInstanceFactory -} - -func NewDefaultInstanceFactory() api.FunctionInstanceFactory { - return &DefaultInstanceFactory{} -} - -func (f *DefaultInstanceFactory) NewFunctionInstance(definition *model.Function, funcCtx api.FunctionContext, - index int32, logger *common.Logger) api.FunctionInstance { - ctx, cancelFunc := context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, CtxKeyFunctionName, definition.Name) - ctx = context.WithValue(ctx, CtxKeyInstanceIndex, index) - return &FunctionInstanceImpl{ - ctx: ctx, - funcCtx: funcCtx, - cancelFunc: cancelFunc, - definition: definition, - readyCh: make(chan error), - index: index, - logger: logger, - } -} - -func (instance *FunctionInstanceImpl) Run(runtime api.FunctionRuntime, sources []<-chan contube.Record, - sink chan<- contube.Record) { - logger := instance.logger - defer close(sink) - - defer logger.Info("function instance has been stopped") - - logger.Info("function instance is running") - - logCounter := common.LogCounter() - channels := make([]reflect.SelectCase, len(sources)+1) - for i, s := range sources { - channels[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(s)} - } - channels[len(sources)] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(instance.ctx.Done())} - - for len(channels) > 0 { - // Use reflect.Select to select a channel from the slice - chosen, value, ok := reflect.Select(channels) - if !ok { - // The selected channel has been closed, remove it from the slice - channels = append(channels[:chosen], channels[chosen+1:]...) - continue - } - - // Convert the selected value to the type Record - record := value.Interface().(contube.Record) - if logger.DebugEnabled() { - logger.Debug("Calling process function", "count", logCounter) - } - - // Call the processing function - output, err := runtime.Call(record) - if err != nil { - if errors.Is(err, context.Canceled) { - return - } - // Log the error if there's an issue with the processing function - logger.Error(err, "failed to process record") - return - } - - // If the output is nil, continue with the next iteration - if output == nil { - continue - } - - // Try to send the output to the sink, but also listen to the context's Done channel - select { - case sink <- output: - case <-instance.ctx.Done(): - return - } - - // If the selected channel is the context's Done channel, exit the loop - if chosen == len(channels)-1 { - return - } - } -} - -func (instance *FunctionInstanceImpl) Stop() { - instance.logger.Info("stopping function instance") - instance.cancelFunc() -} - -func (instance *FunctionInstanceImpl) Context() context.Context { - return instance.ctx -} - -func (instance *FunctionInstanceImpl) FunctionContext() api.FunctionContext { - return instance.funcCtx -} - -func (instance *FunctionInstanceImpl) Definition() *model.Function { - return instance.definition -} - -func (instance *FunctionInstanceImpl) Index() int32 { - return instance.index -} - -func (instance *FunctionInstanceImpl) Logger() *common.Logger { - return instance.logger -} diff --git a/fs/instance_impl_test.go b/fs/instance_impl_test.go deleted file mode 100644 index 81362f43..00000000 --- a/fs/instance_impl_test.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "testing" - - "github.com/functionstream/function-stream/common" - - "github.com/functionstream/function-stream/common/model" -) - -func TestFunctionInstanceContextSetting(t *testing.T) { - defaultInstanceFactory := DefaultInstanceFactory{} - definition := &model.Function{ - Name: "test-function", - } - index := int32(1) - instance := defaultInstanceFactory.NewFunctionInstance(definition, nil, index, common.NewDefaultLogger()) - - if instance == nil { - t.Error("FunctionInstance should not be nil") - } - - if ctxValue, ok := instance.Context().Value(CtxKeyFunctionName).(string); !ok || ctxValue != definition.Name { - t.Errorf("Expected '%s' in ctx to be '%s'", CtxKeyFunctionName, definition.Name) - } - - if ctxValue, ok := instance.Context().Value(CtxKeyInstanceIndex).(int32); !ok || ctxValue != index { - t.Errorf("Expected '%s' in ctx to be '%d'", CtxKeyInstanceIndex, index) - } - -} diff --git a/fs/manager.go b/fs/manager.go deleted file mode 100644 index 4a7bc956..00000000 --- a/fs/manager.go +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "context" - "fmt" - "math/rand" - "strconv" - "sync" - - "github.com/functionstream/function-stream/fs/statestore" - - "github.com/functionstream/function-stream/common/config" - _package "github.com/functionstream/function-stream/fs/package" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/go-logr/logr" - "github.com/pkg/errors" -) - -type FunctionManager interface { - StartFunction(f *model.Function) error - DeleteFunction(namespace, name string) error - ListFunctions() []string - ProduceEvent(name string, event contube.Record) error - ConsumeEvent(name string) (contube.Record, error) - GetStateStore() (api.StateStore, error) - Close() error -} - -type functionManagerImpl struct { - options *managerOptions - functions map[common.NamespacedName][]api.FunctionInstance //TODO: Use sync.map - functionsLock sync.Mutex - log *common.Logger -} - -type managerOptions struct { - tubeFactoryMap map[string]contube.TubeFactory - runtimeFactoryMap map[string]api.FunctionRuntimeFactory - instanceFactory api.FunctionInstanceFactory - stateStoreFactory api.StateStoreFactory - queueFactory contube.TubeFactory - packageLoader api.PackageLoader // TODO: Need to set it - log *logr.Logger -} - -type ManagerOption interface { - apply(option *managerOptions) (*managerOptions, error) -} - -type managerOptionFunc func(*managerOptions) (*managerOptions, error) - -func (f managerOptionFunc) apply(c *managerOptions) (*managerOptions, error) { - return f(c) -} - -func WithTubeFactory(name string, factory contube.TubeFactory) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.tubeFactoryMap[name] = factory - return c, nil - }) -} -func WithQueueFactory(factory contube.TubeFactory) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.queueFactory = factory - return c, nil - }) -} - -func WithRuntimeFactory(name string, factory api.FunctionRuntimeFactory) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.runtimeFactoryMap[name] = factory - return c, nil - }) -} - -func WithInstanceFactory(factory api.FunctionInstanceFactory) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.instanceFactory = factory - return c, nil - }) -} - -func WithStateStoreFactory(storeFactory api.StateStoreFactory) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.stateStoreFactory = storeFactory - return c, nil - }) -} - -func WithLogger(log *logr.Logger) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.log = log - return c, nil - }) -} - -func WithPackageLoader(loader api.PackageLoader) ManagerOption { - return managerOptionFunc(func(c *managerOptions) (*managerOptions, error) { - c.packageLoader = loader - return c, nil - }) -} - -func NewFunctionManager(opts ...ManagerOption) (FunctionManager, error) { - options := &managerOptions{ - tubeFactoryMap: make(map[string]contube.TubeFactory), - runtimeFactoryMap: make(map[string]api.FunctionRuntimeFactory), - } - options.instanceFactory = NewDefaultInstanceFactory() - for _, o := range opts { - _, err := o.apply(options) - if err != nil { - return nil, err - } - } - var log *common.Logger - if options.log == nil { - log = common.NewDefaultLogger() - } else { - log = common.NewLogger(options.log) - } - loadedRuntimeFact := make([]string, 0, len(options.runtimeFactoryMap)) - for k := range options.runtimeFactoryMap { - loadedRuntimeFact = append(loadedRuntimeFact, k) - } - loadedTubeFact := make([]string, 0, len(options.tubeFactoryMap)) - for k := range options.tubeFactoryMap { - loadedTubeFact = append(loadedTubeFact, k) - } - if options.packageLoader == nil { - options.packageLoader = _package.NewDefaultPackageLoader() - } - if options.stateStoreFactory == nil { - if fact, err := statestore.NewDefaultPebbleStateStoreFactory(); err != nil { - return nil, err - } else { - options.stateStoreFactory = fact - } - } - log.Info("Function manager created", "runtime-factories", loadedRuntimeFact, - "tube-factories", loadedTubeFact) - return &functionManagerImpl{ - options: options, - functions: make(map[common.NamespacedName][]api.FunctionInstance), - log: log, - }, nil -} - -func (fm *functionManagerImpl) getTubeFactory(tubeConfig *model.TubeConfig) (contube.TubeFactory, error) { - factory, exist := fm.options.tubeFactoryMap[tubeConfig.Type] - if !exist { - return nil, fmt.Errorf("failed to get tube factory: %w, type: %s", common.ErrorTubeFactoryNotFound, tubeConfig.Type) - } - return factory, nil -} - -func (fm *functionManagerImpl) getRuntimeFactory(t string) (api.FunctionRuntimeFactory, error) { - factory, exist := fm.options.runtimeFactoryMap[t] - if !exist { - return nil, fmt.Errorf("failed to get runtime factory: %w, type: %s", common.ErrorRuntimeFactoryNotFound, t) - } - return factory, nil -} - -func generateRuntimeConfig(ctx context.Context, p api.Package, f *model.Function) (*model.RuntimeConfig, error) { - log := common.GetLogger(ctx) - rc := &model.RuntimeConfig{} - if p == _package.EmptyPackage { - return &f.Runtime, nil - } - supportedRuntimeConf := p.GetSupportedRuntimeConfig() - rcMap := map[string]*model.RuntimeConfig{} - for k, v := range supportedRuntimeConf { - if v.Type == "" { - log.Warn("Package supported runtime type is empty. Ignore it.", "index", k, "package", f.Package) - continue - } - vCopy := v - rcMap[v.Type] = &vCopy - } - if len(rcMap) == 0 { - return nil, common.ErrorPackageNoSupportedRuntime - } - defaultRC := &supportedRuntimeConf[0] - if f.Runtime.Type == "" { - rc.Type = defaultRC.Type - } else { - if r, exist := rcMap[f.Runtime.Type]; exist { - defaultRC = r - } else { - return nil, fmt.Errorf("runtime type '%s' is not supported by package '%s'", f.Runtime.Type, f.Package) - } - rc.Type = f.Runtime.Type - } - rc.Config = config.MergeConfig(defaultRC.Config, f.Runtime.Config) - return rc, nil -} - -func (fm *functionManagerImpl) StartFunction(f *model.Function) error { // TODO: Shouldn't use pointer here - if err := f.Validate(); err != nil { - return err - } - fm.functionsLock.Lock() - if _, exist := fm.functions[common.GetNamespacedName(f.Namespace, f.Name)]; exist { - fm.functionsLock.Unlock() - return common.ErrorFunctionExists - } - fm.functions[common.GetNamespacedName(f.Namespace, f.Name)] = make([]api.FunctionInstance, f.Replicas) - fm.functionsLock.Unlock() - - for i := int32(0); i < f.Replicas; i++ { - p, err := fm.options.packageLoader.Load(f.Package) - if err != nil { - return err - } - runtimeConfig, err := generateRuntimeConfig(context.Background(), p, f) - if err != nil { - return fmt.Errorf("failed to generate runtime config: %v", err) - } - - store, err := fm.options.stateStoreFactory.NewStateStore(f) - if err != nil { - return fmt.Errorf("failed to create state store: %w", err) - } - - funcCtx := newFuncCtxImpl(f, store) - instanceLogger := fm.log.SubLogger("functionName", f.Name, "instanceIndex", int(i), "runtimeType", runtimeConfig.Type) - instance := fm.options.instanceFactory.NewFunctionInstance(f, funcCtx, i, instanceLogger) - fm.functionsLock.Lock() - fm.functions[common.GetNamespacedName(f.Namespace, f.Name)][i] = instance - fm.functionsLock.Unlock() - runtimeFactory, err := fm.getRuntimeFactory(runtimeConfig.Type) - if err != nil { - return err - } - var sources []<-chan contube.Record - for _, t := range f.Sources { - sourceFactory, err := fm.getTubeFactory(&t) - if err != nil { - return err - } - sourceChan, err := sourceFactory.NewSourceTube(instance.Context(), t.Config) - if err != nil { - return fmt.Errorf("failed to create source event queue: %w", err) - } - sources = append(sources, sourceChan) - } - sinkFactory, err := fm.getTubeFactory(&f.Sink) - if err != nil { - return err - } - sink, err := sinkFactory.NewSinkTube(instance.Context(), f.Sink.Config) - if err != nil { - return fmt.Errorf("failed to create sink event queue: %w", err) - } - funcCtx.setSink(sink) - - runtime, err := runtimeFactory.NewFunctionRuntime(instance, runtimeConfig) - if err != nil { - return fmt.Errorf("failed to create runtime: %w", err) - } - fm.log.Info("Starting function instance", "function", f) - - go instance.Run(runtime, sources, sink) - } - return nil -} - -func (fm *functionManagerImpl) DeleteFunction(namespace, name string) error { - fm.functionsLock.Lock() - defer fm.functionsLock.Unlock() - instances, exist := fm.functions[common.GetNamespacedName(namespace, name)] - if !exist { - return common.ErrorFunctionNotFound - } - delete(fm.functions, common.GetNamespacedName(namespace, name)) - for _, instance := range instances { - instance.Stop() - } - return nil -} - -func (fm *functionManagerImpl) ListFunctions() (result []string) { - fm.functionsLock.Lock() - defer fm.functionsLock.Unlock() - result = make([]string, len(fm.functions)) - i := 0 - for k := range fm.functions { - result[i] = k.String() - i++ - } - return -} - -func (fm *functionManagerImpl) ProduceEvent(name string, event contube.Record) error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - factory, ok := fm.options.tubeFactoryMap[common.MemoryTubeType] - if !ok { - return errors.New("memory tube factory not found") - } - c, err := factory.NewSinkTube(ctx, (&contube.SinkQueueConfig{Topic: name}).ToConfigMap()) - if err != nil { - return err - } - c <- event - return nil -} - -func (fm *functionManagerImpl) ConsumeEvent(name string) (contube.Record, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - factory, ok := fm.options.tubeFactoryMap[common.MemoryTubeType] - if !ok { - return nil, errors.New("memory tube factory not found") - } - c, err := factory.NewSourceTube(ctx, (&contube.SourceQueueConfig{ - Topics: []string{name}, SubName: "consume-" + strconv.Itoa(rand.Int())}).ToConfigMap()) - if err != nil { - return nil, err - } - return <-c, nil -} - -// GetStateStore returns the state store used by the function manager -// Return nil if no state store is configured -func (fm *functionManagerImpl) GetStateStore() (api.StateStore, error) { - return fm.options.stateStoreFactory.NewStateStore(nil) -} - -func (fm *functionManagerImpl) Close() error { - fm.functionsLock.Lock() - defer fm.functionsLock.Unlock() - log := common.NewDefaultLogger() - for _, instances := range fm.functions { - for _, instance := range instances { - instance.Stop() - } - } - if fm.options.stateStoreFactory != nil { - if err := fm.options.stateStoreFactory.Close(); err != nil { - log.Error(err, "failed to close state store") - } - } - return nil -} diff --git a/fs/manager_test.go b/fs/manager_test.go deleted file mode 100644 index 6f0619d6..00000000 --- a/fs/manager_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package fs - -import ( - "context" - "testing" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/stretchr/testify/assert" -) - -// Mock implementations of the interfaces and structs -type MockPackage struct { - runtimeConfigs []model.RuntimeConfig -} - -func (m *MockPackage) GetSupportedRuntimeConfig() []model.RuntimeConfig { - return m.runtimeConfigs -} - -func TestGenerateRuntimeConfig_EmptySupportedRuntimeConfig(t *testing.T) { - ctx := context.Background() - p := &MockPackage{runtimeConfigs: []model.RuntimeConfig{}} - f := &model.Function{} - - _, err := generateRuntimeConfig(ctx, p, f) - assert.NotNil(t, err) - assert.Equal(t, common.ErrorPackageNoSupportedRuntime, err) -} - -func TestGenerateRuntimeConfig_EmptyFunctionRuntimeType(t *testing.T) { - ctx := context.Background() - p := &MockPackage{ - runtimeConfigs: []model.RuntimeConfig{ - {Type: "runtime1", Config: map[string]interface{}{"key1": "value1"}}, - }, - } - f := &model.Function{ - Runtime: model.RuntimeConfig{}, - } - - rc, err := generateRuntimeConfig(ctx, p, f) - assert.Nil(t, err) - assert.Equal(t, "runtime1", rc.Type) - assert.Equal(t, "value1", rc.Config["key1"]) -} - -func TestGenerateRuntimeConfig_UnsupportedFunctionRuntimeType(t *testing.T) { - ctx := context.Background() - p := &MockPackage{ - runtimeConfigs: []model.RuntimeConfig{ - {Type: "runtime1", Config: map[string]interface{}{"key1": "value1"}}, - }, - } - f := &model.Function{ - Runtime: model.RuntimeConfig{Type: "unsupported_runtime"}, - } - - _, err := generateRuntimeConfig(ctx, p, f) - assert.NotNil(t, err) - assert.Equal(t, "runtime type 'unsupported_runtime' is not supported by package ''", err.Error()) -} - -func TestGenerateRuntimeConfig_SupportedFunctionRuntimeType(t *testing.T) { - ctx := context.Background() - p := &MockPackage{ - runtimeConfigs: []model.RuntimeConfig{ - {Type: "runtime1", Config: map[string]interface{}{"key1": "value1"}}, - {Type: "runtime2", Config: map[string]interface{}{"key2": "value2"}}, - }, - } - f := &model.Function{ - Runtime: model.RuntimeConfig{Type: "runtime2", Config: map[string]interface{}{"key3": "value3"}}, - } - - rc, err := generateRuntimeConfig(ctx, p, f) - assert.Nil(t, err) - assert.Equal(t, "runtime2", rc.Type) - assert.Equal(t, "value2", rc.Config["key2"]) - assert.Equal(t, "value3", rc.Config["key3"]) -} diff --git a/fs/package/package_loader.go b/fs/package/package_loader.go deleted file mode 100644 index 41213531..00000000 --- a/fs/package/package_loader.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package _package - -import ( - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/api" -) - -type WasmPackage struct { - api.Package - path string -} - -type emptyPackage struct{} - -func (p *emptyPackage) GetSupportedRuntimeConfig() []model.RuntimeConfig { - return nil -} - -var EmptyPackage = &emptyPackage{} - -func (p *WasmPackage) GetSupportedRuntimeConfig() []model.RuntimeConfig { - return []model.RuntimeConfig{ - { - Type: common.WASMRuntime, - Config: map[string]interface{}{ - "archive": p.path, - }, - }, - } -} - -type DefaultPackageLoader struct { -} - -func (p DefaultPackageLoader) Load(path string) (api.Package, error) { - if path == "" { - return EmptyPackage, nil - } - return &WasmPackage{path: path}, nil -} - -func NewDefaultPackageLoader() api.PackageLoader { - return &DefaultPackageLoader{} -} diff --git a/fs/runtime/external/model/fs.pb.go b/fs/runtime/external/model/fs.pb.go deleted file mode 100644 index fe093f78..00000000 --- a/fs/runtime/external/model/fs.pb.go +++ /dev/null @@ -1,1286 +0,0 @@ -// -// Copyright 2024 Function Stream Org. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.2 -// protoc v4.25.2 -// source: fs/runtime/external/model/fs.proto - -package model - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RegisterSchemaRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` -} - -func (x *RegisterSchemaRequest) Reset() { - *x = RegisterSchemaRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegisterSchemaRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegisterSchemaRequest) ProtoMessage() {} - -func (x *RegisterSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegisterSchemaRequest.ProtoReflect.Descriptor instead. -func (*RegisterSchemaRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{0} -} - -func (x *RegisterSchemaRequest) GetSchema() string { - if x != nil { - return x.Schema - } - return "" -} - -type RegisterSchemaResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RegisterSchemaResponse) Reset() { - *x = RegisterSchemaResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegisterSchemaResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegisterSchemaResponse) ProtoMessage() {} - -func (x *RegisterSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegisterSchemaResponse.ProtoReflect.Descriptor instead. -func (*RegisterSchemaResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{1} -} - -type ReadRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ReadRequest) Reset() { - *x = ReadRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadRequest) ProtoMessage() {} - -func (x *ReadRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead. -func (*ReadRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{2} -} - -type Event struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *Event) Reset() { - *x = Event{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Event) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Event) ProtoMessage() {} - -func (x *Event) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Event.ProtoReflect.Descriptor instead. -func (*Event) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{3} -} - -func (x *Event) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Event) GetPayload() []byte { - if x != nil { - return x.Payload - } - return nil -} - -type WriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *WriteResponse) Reset() { - *x = WriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteResponse) ProtoMessage() {} - -func (x *WriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteResponse.ProtoReflect.Descriptor instead. -func (*WriteResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{4} -} - -type StateContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StateContext) Reset() { - *x = StateContext{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StateContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StateContext) ProtoMessage() {} - -func (x *StateContext) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StateContext.ProtoReflect.Descriptor instead. -func (*StateContext) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{5} -} - -type GetStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Context *StateContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` -} - -func (x *GetStateRequest) Reset() { - *x = GetStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetStateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStateRequest) ProtoMessage() {} - -func (x *GetStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStateRequest.ProtoReflect.Descriptor instead. -func (*GetStateRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{6} -} - -func (x *GetStateRequest) GetContext() *StateContext { - if x != nil { - return x.Context - } - return nil -} - -func (x *GetStateRequest) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -type GetStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *GetStateResponse) Reset() { - *x = GetStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetStateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStateResponse) ProtoMessage() {} - -func (x *GetStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStateResponse.ProtoReflect.Descriptor instead. -func (*GetStateResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{7} -} - -func (x *GetStateResponse) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -type PutStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Context *StateContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *PutStateRequest) Reset() { - *x = PutStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PutStateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PutStateRequest) ProtoMessage() {} - -func (x *PutStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PutStateRequest.ProtoReflect.Descriptor instead. -func (*PutStateRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{8} -} - -func (x *PutStateRequest) GetContext() *StateContext { - if x != nil { - return x.Context - } - return nil -} - -func (x *PutStateRequest) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *PutStateRequest) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -type PutStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *PutStateResponse) Reset() { - *x = PutStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PutStateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PutStateResponse) ProtoMessage() {} - -func (x *PutStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PutStateResponse.ProtoReflect.Descriptor instead. -func (*PutStateResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{9} -} - -type ListStatesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Context *StateContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` - StartInclusive string `protobuf:"bytes,2,opt,name=start_inclusive,json=startInclusive,proto3" json:"start_inclusive,omitempty"` - EndExclusive string `protobuf:"bytes,3,opt,name=end_exclusive,json=endExclusive,proto3" json:"end_exclusive,omitempty"` -} - -func (x *ListStatesRequest) Reset() { - *x = ListStatesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListStatesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListStatesRequest) ProtoMessage() {} - -func (x *ListStatesRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListStatesRequest.ProtoReflect.Descriptor instead. -func (*ListStatesRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{10} -} - -func (x *ListStatesRequest) GetContext() *StateContext { - if x != nil { - return x.Context - } - return nil -} - -func (x *ListStatesRequest) GetStartInclusive() string { - if x != nil { - return x.StartInclusive - } - return "" -} - -func (x *ListStatesRequest) GetEndExclusive() string { - if x != nil { - return x.EndExclusive - } - return "" -} - -type ListStatesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` -} - -func (x *ListStatesResponse) Reset() { - *x = ListStatesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListStatesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListStatesResponse) ProtoMessage() {} - -func (x *ListStatesResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListStatesResponse.ProtoReflect.Descriptor instead. -func (*ListStatesResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{11} -} - -func (x *ListStatesResponse) GetKeys() []string { - if x != nil { - return x.Keys - } - return nil -} - -type DeleteStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Context *StateContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` -} - -func (x *DeleteStateRequest) Reset() { - *x = DeleteStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteStateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteStateRequest) ProtoMessage() {} - -func (x *DeleteStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteStateRequest.ProtoReflect.Descriptor instead. -func (*DeleteStateRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{12} -} - -func (x *DeleteStateRequest) GetContext() *StateContext { - if x != nil { - return x.Context - } - return nil -} - -func (x *DeleteStateRequest) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -type DeleteStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *DeleteStateResponse) Reset() { - *x = DeleteStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteStateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteStateResponse) ProtoMessage() {} - -func (x *DeleteStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteStateResponse.ProtoReflect.Descriptor instead. -func (*DeleteStateResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{13} -} - -type GetConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetConfigRequest) Reset() { - *x = GetConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetConfigRequest) ProtoMessage() {} - -func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. -func (*GetConfigRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{14} -} - -type GetConfigResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Config map[string]string `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *GetConfigResponse) Reset() { - *x = GetConfigResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetConfigResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetConfigResponse) ProtoMessage() {} - -func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. -func (*GetConfigResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{15} -} - -func (x *GetConfigResponse) GetConfig() map[string]string { - if x != nil { - return x.Config - } - return nil -} - -type AckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *AckRequest) Reset() { - *x = AckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AckRequest) ProtoMessage() {} - -func (x *AckRequest) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AckRequest.ProtoReflect.Descriptor instead. -func (*AckRequest) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{16} -} - -func (x *AckRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -type AckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *AckResponse) Reset() { - *x = AckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AckResponse) ProtoMessage() {} - -func (x *AckResponse) ProtoReflect() protoreflect.Message { - mi := &file_fs_runtime_external_model_fs_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AckResponse.ProtoReflect.Descriptor instead. -func (*AckResponse) Descriptor() ([]byte, []int) { - return file_fs_runtime_external_model_fs_proto_rawDescGZIP(), []int{17} -} - -var File_fs_runtime_external_model_fs_proto protoreflect.FileDescriptor - -var file_fs_runtime_external_model_fs_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x66, 0x73, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x66, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x22, 0x2f, 0x0a, 0x15, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0d, 0x0a, 0x0b, - 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x05, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x0f, - 0x0a, 0x0d, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x0e, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, - 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x28, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x6e, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x50, 0x75, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, - 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, - 0x6e, 0x64, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x45, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x22, 0x28, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x5b, 0x0a, 0x12, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x92, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1c, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x0d, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8d, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x59, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, - 0x52, 0x65, 0x61, 0x64, 0x12, 0x18, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, - 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x12, 0x2e, 0x66, 0x73, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, - 0x1a, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x03, 0x41, - 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x66, 0x73, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x50, 0x75, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x1c, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x50, 0x75, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x50, 0x75, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, - 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x66, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1b, 0x5a, 0x19, 0x66, 0x73, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6d, 0x6f, 0x64, 0x65, - 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_fs_runtime_external_model_fs_proto_rawDescOnce sync.Once - file_fs_runtime_external_model_fs_proto_rawDescData = file_fs_runtime_external_model_fs_proto_rawDesc -) - -func file_fs_runtime_external_model_fs_proto_rawDescGZIP() []byte { - file_fs_runtime_external_model_fs_proto_rawDescOnce.Do(func() { - file_fs_runtime_external_model_fs_proto_rawDescData = protoimpl.X.CompressGZIP(file_fs_runtime_external_model_fs_proto_rawDescData) - }) - return file_fs_runtime_external_model_fs_proto_rawDescData -} - -var file_fs_runtime_external_model_fs_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_fs_runtime_external_model_fs_proto_goTypes = []any{ - (*RegisterSchemaRequest)(nil), // 0: fs_external.RegisterSchemaRequest - (*RegisterSchemaResponse)(nil), // 1: fs_external.RegisterSchemaResponse - (*ReadRequest)(nil), // 2: fs_external.ReadRequest - (*Event)(nil), // 3: fs_external.Event - (*WriteResponse)(nil), // 4: fs_external.WriteResponse - (*StateContext)(nil), // 5: fs_external.StateContext - (*GetStateRequest)(nil), // 6: fs_external.GetStateRequest - (*GetStateResponse)(nil), // 7: fs_external.GetStateResponse - (*PutStateRequest)(nil), // 8: fs_external.PutStateRequest - (*PutStateResponse)(nil), // 9: fs_external.PutStateResponse - (*ListStatesRequest)(nil), // 10: fs_external.ListStatesRequest - (*ListStatesResponse)(nil), // 11: fs_external.ListStatesResponse - (*DeleteStateRequest)(nil), // 12: fs_external.DeleteStateRequest - (*DeleteStateResponse)(nil), // 13: fs_external.DeleteStateResponse - (*GetConfigRequest)(nil), // 14: fs_external.GetConfigRequest - (*GetConfigResponse)(nil), // 15: fs_external.GetConfigResponse - (*AckRequest)(nil), // 16: fs_external.AckRequest - (*AckResponse)(nil), // 17: fs_external.AckResponse - nil, // 18: fs_external.GetConfigResponse.ConfigEntry -} -var file_fs_runtime_external_model_fs_proto_depIdxs = []int32{ - 5, // 0: fs_external.GetStateRequest.context:type_name -> fs_external.StateContext - 5, // 1: fs_external.PutStateRequest.context:type_name -> fs_external.StateContext - 5, // 2: fs_external.ListStatesRequest.context:type_name -> fs_external.StateContext - 5, // 3: fs_external.DeleteStateRequest.context:type_name -> fs_external.StateContext - 18, // 4: fs_external.GetConfigResponse.config:type_name -> fs_external.GetConfigResponse.ConfigEntry - 0, // 5: fs_external.Function.RegisterSchema:input_type -> fs_external.RegisterSchemaRequest - 2, // 6: fs_external.Function.Read:input_type -> fs_external.ReadRequest - 3, // 7: fs_external.Function.Write:input_type -> fs_external.Event - 16, // 8: fs_external.Function.Ack:input_type -> fs_external.AckRequest - 8, // 9: fs_external.Function.PutState:input_type -> fs_external.PutStateRequest - 6, // 10: fs_external.Function.GetState:input_type -> fs_external.GetStateRequest - 10, // 11: fs_external.Function.ListStates:input_type -> fs_external.ListStatesRequest - 12, // 12: fs_external.Function.DeleteState:input_type -> fs_external.DeleteStateRequest - 14, // 13: fs_external.Function.GetConfig:input_type -> fs_external.GetConfigRequest - 1, // 14: fs_external.Function.RegisterSchema:output_type -> fs_external.RegisterSchemaResponse - 3, // 15: fs_external.Function.Read:output_type -> fs_external.Event - 4, // 16: fs_external.Function.Write:output_type -> fs_external.WriteResponse - 17, // 17: fs_external.Function.Ack:output_type -> fs_external.AckResponse - 9, // 18: fs_external.Function.PutState:output_type -> fs_external.PutStateResponse - 7, // 19: fs_external.Function.GetState:output_type -> fs_external.GetStateResponse - 11, // 20: fs_external.Function.ListStates:output_type -> fs_external.ListStatesResponse - 13, // 21: fs_external.Function.DeleteState:output_type -> fs_external.DeleteStateResponse - 15, // 22: fs_external.Function.GetConfig:output_type -> fs_external.GetConfigResponse - 14, // [14:23] is the sub-list for method output_type - 5, // [5:14] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_fs_runtime_external_model_fs_proto_init() } -func file_fs_runtime_external_model_fs_proto_init() { - if File_fs_runtime_external_model_fs_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_fs_runtime_external_model_fs_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RegisterSchemaRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RegisterSchemaResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ReadRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Event); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*WriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*StateContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GetStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*GetStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*PutStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*PutStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ListStatesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ListStatesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*DeleteStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*DeleteStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*GetConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*GetConfigResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*AckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_fs_runtime_external_model_fs_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*AckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_fs_runtime_external_model_fs_proto_rawDesc, - NumEnums: 0, - NumMessages: 19, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_fs_runtime_external_model_fs_proto_goTypes, - DependencyIndexes: file_fs_runtime_external_model_fs_proto_depIdxs, - MessageInfos: file_fs_runtime_external_model_fs_proto_msgTypes, - }.Build() - File_fs_runtime_external_model_fs_proto = out.File - file_fs_runtime_external_model_fs_proto_rawDesc = nil - file_fs_runtime_external_model_fs_proto_goTypes = nil - file_fs_runtime_external_model_fs_proto_depIdxs = nil -} diff --git a/fs/runtime/external/model/fs.proto b/fs/runtime/external/model/fs.proto deleted file mode 100644 index 1c97779f..00000000 --- a/fs/runtime/external/model/fs.proto +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; -option go_package = "fs/runtime/external/model"; -package fs_external; - -message RegisterSchemaRequest { - string schema = 1; -} - -message RegisterSchemaResponse { -} - -message ReadRequest { - -} - -message Event { - int64 id = 1; - bytes payload = 2; -} - -message WriteResponse { - -} - -message StateContext { - -} - -message GetStateRequest { - StateContext context = 1; - string key = 2; -} - -message GetStateResponse { - bytes value = 2; -} - -message PutStateRequest { - StateContext context = 1; - string key = 2; - bytes value = 3; -} - -message PutStateResponse { - -} - -message ListStatesRequest { - StateContext context = 1; - string start_inclusive = 2; - string end_exclusive = 3; -} - -message ListStatesResponse { - repeated string keys = 1; -} - -message DeleteStateRequest { - StateContext context = 1; - string key = 2; -} - -message DeleteStateResponse { - -} - -message GetConfigRequest { - -} - -message GetConfigResponse { - map config = 1; -} - -message AckRequest { - int64 id = 1; -} - -message AckResponse { - -} - -service Function { - rpc RegisterSchema(RegisterSchemaRequest) returns (RegisterSchemaResponse); - rpc Read(ReadRequest) returns (Event); - rpc Write(Event) returns (WriteResponse); - rpc Ack(AckRequest) returns (AckResponse); - rpc PutState(PutStateRequest) returns (PutStateResponse); - rpc GetState(GetStateRequest) returns (GetStateResponse); - rpc ListStates(ListStatesRequest) returns (ListStatesResponse); - rpc DeleteState(DeleteStateRequest) returns (DeleteStateResponse); - rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); -} diff --git a/fs/runtime/external/model/fs_grpc.pb.go b/fs/runtime/external/model/fs_grpc.pb.go deleted file mode 100644 index 4064443f..00000000 --- a/fs/runtime/external/model/fs_grpc.pb.go +++ /dev/null @@ -1,389 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package model - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// FunctionClient is the client API for Function service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type FunctionClient interface { - RegisterSchema(ctx context.Context, in *RegisterSchemaRequest, opts ...grpc.CallOption) (*RegisterSchemaResponse, error) - Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*Event, error) - Write(ctx context.Context, in *Event, opts ...grpc.CallOption) (*WriteResponse, error) - Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*AckResponse, error) - PutState(ctx context.Context, in *PutStateRequest, opts ...grpc.CallOption) (*PutStateResponse, error) - GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error) - ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) - DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) - GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) -} - -type functionClient struct { - cc grpc.ClientConnInterface -} - -func NewFunctionClient(cc grpc.ClientConnInterface) FunctionClient { - return &functionClient{cc} -} - -func (c *functionClient) RegisterSchema(ctx context.Context, in *RegisterSchemaRequest, opts ...grpc.CallOption) (*RegisterSchemaResponse, error) { - out := new(RegisterSchemaResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/RegisterSchema", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*Event, error) { - out := new(Event) - err := c.cc.Invoke(ctx, "/fs_external.Function/Read", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) Write(ctx context.Context, in *Event, opts ...grpc.CallOption) (*WriteResponse, error) { - out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/Write", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*AckResponse, error) { - out := new(AckResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/Ack", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) PutState(ctx context.Context, in *PutStateRequest, opts ...grpc.CallOption) (*PutStateResponse, error) { - out := new(PutStateResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/PutState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error) { - out := new(GetStateResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/GetState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) ListStates(ctx context.Context, in *ListStatesRequest, opts ...grpc.CallOption) (*ListStatesResponse, error) { - out := new(ListStatesResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/ListStates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) { - out := new(DeleteStateResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/DeleteState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *functionClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, "/fs_external.Function/GetConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// FunctionServer is the server API for Function service. -// All implementations must embed UnimplementedFunctionServer -// for forward compatibility -type FunctionServer interface { - RegisterSchema(context.Context, *RegisterSchemaRequest) (*RegisterSchemaResponse, error) - Read(context.Context, *ReadRequest) (*Event, error) - Write(context.Context, *Event) (*WriteResponse, error) - Ack(context.Context, *AckRequest) (*AckResponse, error) - PutState(context.Context, *PutStateRequest) (*PutStateResponse, error) - GetState(context.Context, *GetStateRequest) (*GetStateResponse, error) - ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) - DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) - GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) - mustEmbedUnimplementedFunctionServer() -} - -// UnimplementedFunctionServer must be embedded to have forward compatible implementations. -type UnimplementedFunctionServer struct { -} - -func (UnimplementedFunctionServer) RegisterSchema(context.Context, *RegisterSchemaRequest) (*RegisterSchemaResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RegisterSchema not implemented") -} -func (UnimplementedFunctionServer) Read(context.Context, *ReadRequest) (*Event, error) { - return nil, status.Errorf(codes.Unimplemented, "method Read not implemented") -} -func (UnimplementedFunctionServer) Write(context.Context, *Event) (*WriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Write not implemented") -} -func (UnimplementedFunctionServer) Ack(context.Context, *AckRequest) (*AckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ack not implemented") -} -func (UnimplementedFunctionServer) PutState(context.Context, *PutStateRequest) (*PutStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PutState not implemented") -} -func (UnimplementedFunctionServer) GetState(context.Context, *GetStateRequest) (*GetStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetState not implemented") -} -func (UnimplementedFunctionServer) ListStates(context.Context, *ListStatesRequest) (*ListStatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListStates not implemented") -} -func (UnimplementedFunctionServer) DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteState not implemented") -} -func (UnimplementedFunctionServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented") -} -func (UnimplementedFunctionServer) mustEmbedUnimplementedFunctionServer() {} - -// UnsafeFunctionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to FunctionServer will -// result in compilation errors. -type UnsafeFunctionServer interface { - mustEmbedUnimplementedFunctionServer() -} - -func RegisterFunctionServer(s grpc.ServiceRegistrar, srv FunctionServer) { - s.RegisterService(&Function_ServiceDesc, srv) -} - -func _Function_RegisterSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegisterSchemaRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).RegisterSchema(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/RegisterSchema", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).RegisterSchema(ctx, req.(*RegisterSchemaRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).Read(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/Read", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).Read(ctx, req.(*ReadRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_Write_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Event) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).Write(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/Write", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).Write(ctx, req.(*Event)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_Ack_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).Ack(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/Ack", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).Ack(ctx, req.(*AckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_PutState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).PutState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/PutState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).PutState(ctx, req.(*PutStateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_GetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).GetState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/GetState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).GetState(ctx, req.(*GetStateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_ListStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListStatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).ListStates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/ListStates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).ListStates(ctx, req.(*ListStatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_DeleteState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).DeleteState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/DeleteState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).DeleteState(ctx, req.(*DeleteStateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Function_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(FunctionServer).GetConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/fs_external.Function/GetConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(FunctionServer).GetConfig(ctx, req.(*GetConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Function_ServiceDesc is the grpc.ServiceDesc for Function service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Function_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "fs_external.Function", - HandlerType: (*FunctionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "RegisterSchema", - Handler: _Function_RegisterSchema_Handler, - }, - { - MethodName: "Read", - Handler: _Function_Read_Handler, - }, - { - MethodName: "Write", - Handler: _Function_Write_Handler, - }, - { - MethodName: "Ack", - Handler: _Function_Ack_Handler, - }, - { - MethodName: "PutState", - Handler: _Function_PutState_Handler, - }, - { - MethodName: "GetState", - Handler: _Function_GetState_Handler, - }, - { - MethodName: "ListStates", - Handler: _Function_ListStates_Handler, - }, - { - MethodName: "DeleteState", - Handler: _Function_DeleteState_Handler, - }, - { - MethodName: "GetConfig", - Handler: _Function_GetConfig_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "fs/runtime/external/model/fs.proto", -} diff --git a/fs/runtime/external/runtime.go b/fs/runtime/external/runtime.go deleted file mode 100644 index 9d7b4673..00000000 --- a/fs/runtime/external/runtime.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package external - -import ( - "context" - "fmt" - "net" - "os" - "sync" - - "github.com/functionstream/function-stream/common/config" - funcModel "github.com/functionstream/function-stream/common/model" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/functionstream/function-stream/fs/runtime/external/model" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -type functionServerImpl struct { - model.FunctionServer - runtimeMaps sync.Map -} - -func (f *functionServerImpl) getFunctionRuntime(ctx context.Context) (*runtime, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, fmt.Errorf("failed to get metadata") - } - if _, ok := md["name"]; !ok || len(md["name"]) == 0 { - return nil, fmt.Errorf("the metadata doesn't contain the function name") - } - name := md["name"][0] - r, ok := f.runtimeMaps.Load(name) - if !ok { - msg := fmt.Sprintf("function runtime %s not found", name) - return nil, status.Error(codes.Unavailable, msg) - } - return r.(*runtime), nil -} - -func (f *functionServerImpl) RegisterSchema(ctx context.Context, - request *model.RegisterSchemaRequest) (*model.RegisterSchemaResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - r.log.Info("Registering schema", "schema", request.Schema) - return &model.RegisterSchemaResponse{}, nil -} - -func (f *functionServerImpl) Read(ctx context.Context, _ *model.ReadRequest) (*model.Event, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - return r.ReadRecord(ctx) -} - -func (f *functionServerImpl) Write(ctx context.Context, event *model.Event) (*model.WriteResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - if err = r.funcCtx.Write(contube.NewRecordImpl(event.Payload, func() {})); err != nil { - return nil, err - } - return &model.WriteResponse{}, nil -} - -func (f *functionServerImpl) Ack(ctx context.Context, request *model.AckRequest) (*model.AckResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - r.Ack(request.Id) - return &model.AckResponse{}, nil -} - -func (f *functionServerImpl) PutState( - ctx context.Context, request *model.PutStateRequest) (*model.PutStateResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - if err := r.funcCtx.PutState(ctx, request.Key, request.Value); err != nil { - return nil, err - } - return &model.PutStateResponse{}, nil -} - -func (f *functionServerImpl) GetState( - ctx context.Context, request *model.GetStateRequest) (*model.GetStateResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - value, err := r.funcCtx.GetState(ctx, request.Key) - if err != nil { - return nil, err - } - return &model.GetStateResponse{ - Value: value, - }, nil -} - -func (f *functionServerImpl) ListStates( - ctx context.Context, request *model.ListStatesRequest) (*model.ListStatesResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - keys, err := r.funcCtx.ListStates(ctx, request.StartInclusive, request.EndExclusive) - if err != nil { - return nil, err - } - return &model.ListStatesResponse{ - Keys: keys, - }, nil -} - -func (f *functionServerImpl) DeleteState( - ctx context.Context, request *model.DeleteStateRequest) (*model.DeleteStateResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - if err := r.funcCtx.DeleteState(ctx, request.Key); err != nil { - return nil, err - } - return &model.DeleteStateResponse{}, nil -} - -func (f *functionServerImpl) GetConfig( - ctx context.Context, _ *model.GetConfigRequest) (*model.GetConfigResponse, error) { - r, err := f.getFunctionRuntime(ctx) - if err != nil { - return nil, err - } - return &model.GetConfigResponse{ - Config: r.funcCtx.GetConfig(), - }, nil -} - -var _ model.FunctionServer = &functionServerImpl{} - -type Factory struct { - server *functionServerImpl - sync.Mutex - log *common.Logger -} - -func (f *Factory) NewFunctionRuntime(instance api.FunctionInstance, - _ *funcModel.RuntimeConfig) (api.FunctionRuntime, error) { - def := instance.Definition() - r := &runtime{ - inputCh: make(chan contube.Record), - funcCtx: instance.FunctionContext(), - log: instance.Logger(), - recordsMap: make(map[int64]contube.Record), - } - f.server.runtimeMaps.Store(common.GetNamespacedName(def.Namespace, def.Name).String(), r) - f.log.Info("Creating new function runtime", "function", common.GetNamespacedName(def.Namespace, def.Name)) - return r, nil -} - -func NewFactory(lis net.Listener) api.FunctionRuntimeFactory { - log := common.NewDefaultLogger().SubLogger("component", "external-runtime") - s := grpc.NewServer() - server := &functionServerImpl{} - model.RegisterFunctionServer(s, server) - // Register the health check service - healthServer := health.NewServer() - grpc_health_v1.RegisterHealthServer(s, healthServer) - healthServer.SetServingStatus("fs_external.Function", grpc_health_v1.HealthCheckResponse_SERVING) - - go func() { - log.Info("Starting external runtime server") - if err := s.Serve(lis); err != nil { - log.Error(err, "Failed to start external runtime server") - } - }() - return &Factory{ - server: server, - log: common.NewDefaultLogger().SubLogger("component", "external-runtime-factory"), - } -} - -const ( - DefaultSocketPath = "/tmp/fs.sock" -) - -func NewFactoryWithConfig(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - socketPath := "" - if v, ok := configMap["socket-path"].(string); ok { - socketPath = v - } - if socketPath == "" { - common.NewDefaultLogger().Info("socketPath is not set, use the default value: " + DefaultSocketPath) - socketPath = DefaultSocketPath - } - _ = os.Remove(socketPath) - lis, err := net.Listen("unix", socketPath) - if err != nil { - return nil, err - } - return NewFactory(lis), nil -} - -type runtime struct { - inputCh chan contube.Record - funcCtx api.FunctionContext - log *common.Logger - - recordsMapMu sync.Mutex - recordIndex int64 - recordsMap map[int64]contube.Record -} - -func (r *runtime) Call(e contube.Record) (contube.Record, error) { - r.inputCh <- e - return nil, nil -} - -func (r *runtime) Stop() { -} - -func (r *runtime) ReadRecord(ctx context.Context) (*model.Event, error) { - select { - case e := <-r.inputCh: - r.recordsMapMu.Lock() - defer r.recordsMapMu.Unlock() - eventId := r.recordIndex - r.recordIndex++ - r.recordsMap[eventId] = e - return &model.Event{ - Id: eventId, - Payload: e.GetPayload(), - }, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Ack acknowledges the processing of a record -// This is an idempotent operation -func (r *runtime) Ack(id int64) { - r.recordsMapMu.Lock() - defer r.recordsMapMu.Unlock() - if record, ok := r.recordsMap[id]; ok { - record.Commit() - delete(r.recordsMap, id) - } -} diff --git a/fs/runtime/external/runtime_test.go b/fs/runtime/external/runtime_test.go deleted file mode 100644 index 06e11c4c..00000000 --- a/fs/runtime/external/runtime_test.go +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package external - -import ( - "context" - "encoding/json" - "fmt" - "net" - "os" - "testing" - "time" - - "github.com/functionstream/function-stream/fs/statestore" - - "github.com/functionstream/function-stream/clients/gofs" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs" - "github.com/functionstream/function-stream/fs/contube" - "github.com/stretchr/testify/assert" -) - -type Person struct { - Name string `json:"name"` - Money int `json:"money"` - Expected int `json:"expected"` -} - -type Counter struct { - Count int `json:"count"` -} - -type testRecord struct { - ID int `json:"id"` - Name string `json:"name"` -} - -var log = common.NewDefaultLogger() - -type TestFunction struct { -} - -func (f *TestFunction) Init(_ gofs.FunctionContext) error { - return nil -} - -func (f *TestFunction) Handle(_ gofs.FunctionContext, event gofs.Event[Person]) (gofs.Event[Person], error) { - p := event.Data() - p.Money += 1 - return gofs.NewEvent(p), nil -} - -type TestCounterFunction struct { -} - -func (f *TestCounterFunction) Init(ctx gofs.FunctionContext) error { - return nil -} - -func (f *TestCounterFunction) Handle(_ gofs.FunctionContext, event gofs.Event[Counter]) (gofs.Event[Counter], error) { - c := event.Data() - c.Count += 1 - return gofs.NewEvent(c), nil -} - -type TestSource struct { -} - -func (f *TestSource) Init(_ gofs.FunctionContext) error { - return nil -} - -func (f *TestSource) Handle(_ gofs.FunctionContext, emit func(context.Context, gofs.Event[testRecord]) error) error { - for i := 0; i < 10; i++ { - err := emit(context.Background(), gofs.NewEvent(&testRecord{ - ID: i, - Name: "test", - })) - if err != nil { - log.Error(err, "failed to emit record") - } - } - return nil -} - -type TestModules struct { - testFunction *TestFunction - testCounter *TestCounterFunction - testSource *TestSource - testSink *TestSink -} - -func NewTestModules() *TestModules { - return &TestModules{ - testFunction: &TestFunction{}, - testCounter: &TestCounterFunction{}, - testSource: &TestSource{}, - testSink: &TestSink{ - sinkCh: make(chan Counter), - }, - } -} - -func (t *TestModules) Run() { - err := gofs.NewFSClient(). - Register(gofs.DefaultModule, gofs.WithFunction(t.testFunction)). - Register("counter", gofs.WithFunction(t.testCounter)). - Register("test-source", gofs.WithSource(t.testSource)). - Register("test-sink", gofs.WithSink(t.testSink)). - Run() - if err != nil { - log.Error(err, "failed to run mock client") - } -} - -//nolint:goconst -func TestExternalRuntime(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("memory", contube.NewMemoryQueueFactory(context.Background())), - ) - if err != nil { - t.Fatal(err) - } - - go NewTestModules().Run() - - inputTopic := "input" - outputTopic := "output" - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{inputTopic}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: outputTopic, - }).ToConfigMap(), - }, - Replicas: 1, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - acked := make(chan struct{}) - - event, err := contube.NewStructRecord(&Person{ - Name: "test", - Money: 1, - }, func() { - acked <- struct{}{} - }) - assert.NoError(t, err) - err = fm.ProduceEvent(inputTopic, event) - assert.NoError(t, err) - output, err := fm.ConsumeEvent(outputTopic) - assert.NoError(t, err) - - p := &Person{} - err = json.Unmarshal(output.GetPayload(), &p) - assert.NoError(t, err) - assert.Equal(t, 2, p.Money) - - select { - case <-acked: - case <-time.After(5 * time.Second): - t.Fatal("failed to ack event") - } - - err = fm.DeleteFunction("", f.Name) - assert.NoError(t, err) -} - -func TestNonDefaultModule(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - assert.NoError(t, os.Setenv("FS_MODULE_NAME", "counter")) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("memory", contube.NewMemoryQueueFactory(context.Background())), - ) - if err != nil { - t.Fatal(err) - } - - inputTopic := "input" - outputTopic := "output" - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Module: "counter", - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{inputTopic}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: outputTopic, - }).ToConfigMap(), - }, - Replicas: 1, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - go NewTestModules().Run() - - event, err := contube.NewStructRecord(&Counter{ - Count: 1, - }, func() {}) - assert.NoError(t, err) - err = fm.ProduceEvent(inputTopic, event) - assert.NoError(t, err) - output, err := fm.ConsumeEvent(outputTopic) - assert.NoError(t, err) - - c := &Counter{} - err = json.Unmarshal(output.GetPayload(), &c) - assert.NoError(t, err) - assert.Equal(t, 2, c.Count) - - err = fm.DeleteFunction("", f.Name) - assert.NoError(t, err) -} - -func TestExternalSourceModule(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - assert.NoError(t, os.Setenv("FS_MODULE_NAME", "test-source")) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("memory", contube.NewMemoryQueueFactory(context.Background())), - fs.WithTubeFactory("empty", contube.NewEmptyTubeFactory()), - ) - if err != nil { - t.Fatal(err) - } - - outputTopic := "output" - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Module: "test-source", - Sources: []model.TubeConfig{ - { - Type: common.EmptyTubeType, - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: outputTopic, - }).ToConfigMap(), - }, - Replicas: 1, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - go NewTestModules().Run() - - for i := 0; i < 10; i++ { - output, err := fm.ConsumeEvent(outputTopic) - assert.NoError(t, err) - - r := &testRecord{} - err = json.Unmarshal(output.GetPayload(), &r) - assert.NoError(t, err) - assert.Equal(t, i, r.ID) - } - - err = fm.DeleteFunction("", f.Name) - assert.NoError(t, err) -} - -type TestSink struct { - sinkCh chan Counter -} - -func (f *TestSink) Init(_ gofs.FunctionContext) error { - return nil -} - -func (f *TestSink) Handle(ctx gofs.FunctionContext, event gofs.Event[Counter]) error { - f.sinkCh <- *event.Data() - return event.Ack(ctx) -} - -func TestExternalSinkModule(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - assert.NoError(t, os.Setenv("FS_MODULE_NAME", "test-sink")) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("memory", contube.NewMemoryQueueFactory(context.Background())), - fs.WithTubeFactory("empty", contube.NewEmptyTubeFactory()), - ) - if err != nil { - t.Fatal(err) - } - - inputTopic := "input" - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Module: "test-sink", - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{inputTopic}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.EmptyTubeType, - }, - Replicas: 1, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - testMods := NewTestModules() - sinkMod := testMods.testSink - - go testMods.Run() - - ackCh := make(chan struct{}, 100) - - event, err := contube.NewStructRecord(&Counter{ - Count: 1, - }, func() { - ackCh <- struct{}{} - }) - assert.NoError(t, err) - err = fm.ProduceEvent(inputTopic, event) - assert.NoError(t, err) - - r := <-sinkMod.sinkCh - assert.Equal(t, 1, r.Count) - - select { - case <-ackCh: - case <-time.After(5 * time.Second): - t.Fatal("failed to ack event") - } - - err = fm.DeleteFunction("", f.Name) - assert.NoError(t, err) -} - -func TestExternalStatefulModule(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - assert.NoError(t, os.Setenv("FS_MODULE_NAME", "test-stateful")) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - storeFactory, err := statestore.NewDefaultPebbleStateStoreFactory() - assert.NoError(t, err) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("memory", contube.NewMemoryQueueFactory(context.Background())), - fs.WithTubeFactory("empty", contube.NewEmptyTubeFactory()), - fs.WithStateStoreFactory(storeFactory), - ) - assert.NoError(t, err) - - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Module: "test-stateful", - Sources: []model.TubeConfig{ - { - Type: common.EmptyTubeType, - }, - }, - Sink: model.TubeConfig{ - Type: common.EmptyTubeType, - }, - Replicas: 1, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - readyCh := make(chan struct{}) - - go func() { - err := gofs.NewFSClient().Register("test-stateful", gofs.WithCustom(gofs.NewSimpleCustom( - func(ctx gofs.FunctionContext) error { - err = ctx.PutState(context.Background(), "test-key", []byte("test-value")) - if err != nil { - log.Error(err, "failed to put state") - } - close(readyCh) - return nil - }, - ))).Run() - if err != nil { - log.Error(err, "failed to run mock client") - } - }() - - <-readyCh - - store, err := storeFactory.NewStateStore(nil) - assert.NoError(t, err) - - value, err := store.GetState(context.Background(), "test-key") - assert.NoError(t, err) - assert.Equal(t, "test-value", string(value)) -} - -func TestFunctionConfig(t *testing.T) { - testSocketPath := fmt.Sprintf("/tmp/%s.sock", t.Name()) - assert.NoError(t, os.RemoveAll(testSocketPath)) - module := "test-function-config" - assert.NoError(t, os.Setenv("FS_SOCKET_PATH", testSocketPath)) - assert.NoError(t, os.Setenv("FS_FUNCTION_NAME", "test")) - assert.NoError(t, os.Setenv("FS_MODULE_NAME", module)) - lis, err := net.Listen("unix", testSocketPath) - assert.NoError(t, err) - defer func(lis net.Listener) { - _ = lis.Close() - }(lis) - - storeFactory, err := statestore.NewDefaultPebbleStateStoreFactory() - assert.NoError(t, err) - - fm, err := fs.NewFunctionManager( - fs.WithRuntimeFactory("external", NewFactory(lis)), - fs.WithTubeFactory("empty", contube.NewEmptyTubeFactory()), - fs.WithStateStoreFactory(storeFactory), - ) - assert.NoError(t, err) - - f := &model.Function{ - Name: "test", - Runtime: model.RuntimeConfig{ - Type: "external", - }, - Module: module, - Sources: []model.TubeConfig{ - { - Type: common.EmptyTubeType, - }, - }, - Sink: model.TubeConfig{ - Type: common.EmptyTubeType, - }, - Replicas: 1, - Config: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - } - - err = fm.StartFunction(f) - assert.NoError(t, err) - - readyCh := make(chan struct{}) - - go func() { - err := gofs.NewFSClient().Register(module, gofs.WithCustom(gofs.NewSimpleCustom( - func(ctx gofs.FunctionContext) error { - err = ctx.PutState(context.Background(), "test-key", []byte("test-value")) - if err != nil { - log.Error(err, "failed to put state") - } - close(readyCh) - return nil - }, - ))).Run() - if err != nil { - log.Error(err, "failed to run mock client") - } - }() - - <-readyCh - - store, err := storeFactory.NewStateStore(nil) - assert.NoError(t, err) - - value, err := store.GetState(context.Background(), "test-key") - assert.NoError(t, err) - assert.Equal(t, "test-value", string(value)) -} diff --git a/fs/runtime/wazero/fs.go b/fs/runtime/wazero/fs.go deleted file mode 100644 index a9d4e296..00000000 --- a/fs/runtime/wazero/fs.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wazero - -import ( - "io/fs" - - "github.com/functionstream/function-stream/common" - - . "github.com/tetratelabs/wazero/experimental/sys" - "github.com/tetratelabs/wazero/sys" -) - -type memoryFS struct { - FS - m map[string]File -} - -func (f *memoryFS) OpenFile(path string, _ Oflag, _ fs.FileMode) (File, Errno) { - if path == "." { - return &oneShotFile{isDir: true}, 0 - } - if file, ok := f.m[path]; ok { - return file, 0 - } - return nil, ENOENT -} - -func newMemoryFS(m map[string]File) FS { - return &memoryFS{ - m: m, - } -} - -type oneShotFile struct { - File - isDir bool - input []byte - output []byte -} - -func (f *oneShotFile) Read(p []byte) (n int, errno Errno) { - copy(p, f.input) - return len(p), 0 -} - -func (f *oneShotFile) Write(buf []byte) (n int, errno Errno) { - f.output = make([]byte, len(buf)) - copy(f.output, buf) - return len(buf), 0 -} - -func (f *oneShotFile) IsDir() (bool, Errno) { - return f.isDir, 0 -} - -func (f *oneShotFile) Close() Errno { - return 0 -} - -func (f *oneShotFile) Stat() (sys.Stat_t, Errno) { - return sys.Stat_t{ - Size: int64(len(f.input)), - }, 0 -} - -type logWriter struct { - log *common.Logger -} - -func (f *logWriter) Write(buf []byte) (n int, err error) { - f.log.Info(string(buf)) - return len(buf), nil -} diff --git a/fs/runtime/wazero/wazero_runtime.go b/fs/runtime/wazero/wazero_runtime.go deleted file mode 100644 index df0971b6..00000000 --- a/fs/runtime/wazero/wazero_runtime.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wazero - -import ( - "context" - "errors" - "fmt" - "os" - - "github.com/functionstream/function-stream/clients/gofs" - - "github.com/functionstream/function-stream/common/model" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/tetratelabs/wazero" - wazero_api "github.com/tetratelabs/wazero/api" - exp_sys "github.com/tetratelabs/wazero/experimental/sys" - "github.com/tetratelabs/wazero/experimental/sysfs" - "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" - "github.com/tetratelabs/wazero/sys" -) - -type WazeroFunctionRuntimeFactory struct { - opts *options -} - -type WASMFetcher interface { - Fetch(url string) ([]byte, error) -} - -type FileWASMFetcher struct { -} - -func (f *FileWASMFetcher) Fetch(url string) ([]byte, error) { - return os.ReadFile(url) -} - -func NewWazeroFunctionRuntimeFactory() api.FunctionRuntimeFactory { - return NewWazeroFunctionRuntimeFactoryWithOptions(WithWASMFetcher(&FileWASMFetcher{})) -} - -func NewWazeroFunctionRuntimeFactoryWithOptions(opts ...func(*options)) api.FunctionRuntimeFactory { - o := &options{} - for _, opt := range opts { - opt(o) - } - return &WazeroFunctionRuntimeFactory{ - opts: o, - } -} - -type options struct { - wasmFetcher WASMFetcher -} - -func WithWASMFetcher(fetcher WASMFetcher) func(*options) { - return func(o *options) { - o.wasmFetcher = fetcher - } -} - -func (f *WazeroFunctionRuntimeFactory) NewFunctionRuntime(instance api.FunctionInstance, - rc *model.RuntimeConfig) (api.FunctionRuntime, error) { - log := instance.Logger() - r := wazero.NewRuntime(instance.Context()) - _, err := r.NewHostModuleBuilder("env").NewFunctionBuilder().WithFunc(func(ctx context.Context, - m wazero_api.Module, a, b, c, d uint32) { - log.Error(fmt.Errorf("abort(%d, %d, %d, %d)", a, b, c, d), "the function is calling abort") - }).Export("abort").Instantiate(instance.Context()) - if err != nil { - return nil, fmt.Errorf("error instantiating env module: %w", err) - } - wasmLog := &logWriter{ - log: log, - } - - processFile := &oneShotFile{} - registerSchema := &oneShotFile{} - fileMap := map[string]exp_sys.File{ - "process": processFile, - "registerSchema": registerSchema, - } - fsConfig := wazero.NewFSConfig().(sysfs.FSConfig).WithSysFSMount(newMemoryFS(fileMap), "") - config := wazero.NewModuleConfig(). - WithEnv(gofs.FSFunctionName, common.GetNamespacedName(instance.Definition().Namespace, - instance.Definition().Name).String()). - WithStdout(wasmLog).WithStderr(wasmLog).WithFSConfig(fsConfig) - - wasi_snapshot_preview1.MustInstantiate(instance.Context(), r) - - if rc.Config == nil { - return nil, fmt.Errorf("no runtime config found") - } - path, exist := rc.Config["archive"] - if !exist { - return nil, fmt.Errorf("no wasm archive found") - } - pathStr := path.(string) - if pathStr == "" { - return nil, fmt.Errorf("empty wasm archive found") - } - wasmBytes, err := f.opts.wasmFetcher.Fetch(pathStr) - if err != nil { - return nil, fmt.Errorf("error reading wasm file: %w", err) - } - mod, err := r.InstantiateWithConfig(instance.Context(), wasmBytes, config) - if err != nil { - var exitErr *sys.ExitError - if errors.As(err, &exitErr) && exitErr.ExitCode() != 0 { - return nil, fmt.Errorf("failed to instantiate function, function exit with code %d", exitErr.ExitCode()) - } - } - if err != nil { - return nil, fmt.Errorf("error instantiating runtime: %w", err) - } - process := mod.ExportedFunction("process") - if process == nil { - return nil, fmt.Errorf("no process function found") - } - outputSchemaDef := registerSchema.output - var outputSchema string - if outputSchemaDef != nil { - outputSchema = string(outputSchemaDef) - log.Info("Register the output schema", "schema", outputSchema) - } - return &FunctionRuntime{ - callFunc: func(e contube.Record) (contube.Record, error) { - processFile.input = e.GetPayload() - _, err := process.Call(instance.Context()) - if err != nil { - return nil, err - } - return contube.NewSchemaRecordImpl(processFile.output, outputSchema, e.Commit), nil - }, - stopFunc: func() { - err := r.Close(instance.Context()) - if err != nil { - log.Error(err, "failed to close the runtime") - } - }, - log: log, - }, nil -} - -type FunctionRuntime struct { - api.FunctionRuntime - callFunc func(e contube.Record) (contube.Record, error) - stopFunc func() - log *common.Logger -} - -func (r *FunctionRuntime) Call(e contube.Record) (contube.Record, error) { - return r.callFunc(e) -} - -func (r *FunctionRuntime) Stop() { - r.stopFunc() -} diff --git a/fs/statestore/pebble.go b/fs/statestore/pebble.go deleted file mode 100644 index 974767ab..00000000 --- a/fs/statestore/pebble.go +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package statestore - -import ( - "context" - "fmt" - "os" - - "github.com/functionstream/function-stream/common/config" - "github.com/functionstream/function-stream/common/model" - - "github.com/cockroachdb/pebble" - "github.com/functionstream/function-stream/fs/api" - "github.com/pkg/errors" -) - -type PebbleStateStoreFactory struct { - db *pebble.DB -} - -type PebbleStateStoreFactoryConfig struct { - DirName string `json:"dir_name" validate:"required"` -} - -type PebbleStateStoreConfig struct { - KeyPrefix string `json:"key_prefix,omitempty"` -} - -func NewPebbleStateStoreFactory(config config.ConfigMap) (api.StateStoreFactory, error) { - c := &PebbleStateStoreFactoryConfig{} - err := config.ToConfigStruct(c) - if err != nil { - return nil, fmt.Errorf("failed to parse config: %w", err) - } - db, err := pebble.Open(c.DirName, &pebble.Options{}) - if err != nil { - return nil, err - } - return &PebbleStateStoreFactory{db: db}, nil -} - -func NewDefaultPebbleStateStoreFactory() (api.StateStoreFactory, error) { - dir, err := os.MkdirTemp("", "") - if err != nil { - return nil, err - } - db, err := pebble.Open(dir, &pebble.Options{}) - if err != nil { - return nil, err - } - return &PebbleStateStoreFactory{db: db}, nil -} - -func (fact *PebbleStateStoreFactory) NewStateStore(f *model.Function) (api.StateStore, error) { - if f == nil { - return &PebbleStateStore{ - db: fact.db, - keyPrefix: "", - }, nil - } - c := &PebbleStateStoreConfig{} - err := f.State.ToConfigStruct(c) - if err != nil { - return nil, fmt.Errorf("failed to parse config: %w", err) - } - return &PebbleStateStore{ - db: fact.db, - keyPrefix: c.KeyPrefix, - }, nil -} - -func (fact *PebbleStateStoreFactory) Close() error { - return fact.db.Close() -} - -type PebbleStateStore struct { - db *pebble.DB - keyPrefix string -} - -func (s *PebbleStateStore) getKey(key string) string { - return s.keyPrefix + key -} - -func (s *PebbleStateStore) PutState(ctx context.Context, key string, value []byte) error { - if err := s.db.Set([]byte(s.getKey(key)), value, pebble.NoSync); err != nil { - return err - } - return nil -} - -func (s *PebbleStateStore) GetState(ctx context.Context, key string) ([]byte, error) { - value, closer, err := s.db.Get([]byte(s.getKey(key))) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - return nil, api.ErrNotFound - } - return nil, err - } - result := make([]byte, len(value)) - copy(result, value) - if err := closer.Close(); err != nil { - return nil, err - } - return result, nil -} - -func (s *PebbleStateStore) ListStates( - ctx context.Context, startInclusive string, endExclusive string) ([]string, error) { - iter, err := s.db.NewIter(&pebble.IterOptions{ - LowerBound: []byte(s.getKey(startInclusive)), - UpperBound: []byte(s.getKey(endExclusive)), - }) - if err != nil { - return nil, err - } - defer func(iter *pebble.Iterator) { - _ = iter.Close() - }(iter) - var keys []string - for iter.First(); iter.Valid(); iter.Next() { - keys = append(keys, string(iter.Key())) - } - return keys, nil -} - -func (s *PebbleStateStore) DeleteState(ctx context.Context, key string) error { - if err := s.db.Delete([]byte(s.getKey(key)), pebble.NoSync); err != nil { - return err - } - return nil -} - -func (s *PebbleStateStore) Close() error { - return nil -} diff --git a/fs/statestore/pebble_test.go b/fs/statestore/pebble_test.go deleted file mode 100644 index 725f80f8..00000000 --- a/fs/statestore/pebble_test.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package statestore_test - -import ( - "context" - "testing" - - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/statestore" - "github.com/stretchr/testify/assert" -) - -func TestPebbleStateStore(t *testing.T) { - ctx := context.Background() - storeFact, err := statestore.NewDefaultPebbleStateStoreFactory() - assert.Nil(t, err) - store, err := storeFact.NewStateStore(nil) - assert.Nil(t, err) - - _, err = store.GetState(ctx, "key") - assert.ErrorIs(t, err, api.ErrNotFound) - - err = store.PutState(ctx, "key", []byte("value")) - assert.Nil(t, err) - - value, err := store.GetState(ctx, "key") - assert.Nil(t, err) - assert.Equal(t, "value", string(value)) -} diff --git a/functions/example-functions.yaml b/functions/example-functions.yaml deleted file mode 100644 index 45767572..00000000 --- a/functions/example-functions.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: function-sample -namespace: function-stream -runtime: - type: "wasm" - config: - archive: "bin/example_basic.wasm" -sources: - - config: - inputs: - - "input" - subscription-name: "function-stream" - type: "memory" -sink: - config: - output: "output" - type: "memory" -replicas: 1 \ No newline at end of file diff --git a/go.mod b/go.mod deleted file mode 100644 index 8fe63292..00000000 --- a/go.mod +++ /dev/null @@ -1,119 +0,0 @@ -module github.com/functionstream/function-stream - -go 1.22 - -require ( - github.com/apache/pulsar-client-go v0.12.0 - github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e - github.com/cockroachdb/pebble v1.1.0 - github.com/emicklei/go-restful-openapi/v2 v2.9.2-0.20231020145053-a5b7d60bb267 - github.com/emicklei/go-restful/v3 v3.12.0 - github.com/go-logr/logr v1.4.1 - github.com/go-logr/zapr v1.3.0 - github.com/go-openapi/spec v0.21.0 - github.com/go-playground/validator/v10 v10.11.1 - github.com/nats-io/nats.go v1.37.0 - github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.9.0 - github.com/tetratelabs/wazero v1.6.0 - github.com/wirelessr/avroschema v0.0.0-20240111032105-ef4f4560e2a7 - go.uber.org/zap v1.26.0 - golang.org/x/net v0.26.0 - golang.org/x/time v0.5.0 - google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.33.0 - gopkg.in/yaml.v3 v3.0.1 -) - -require ( - github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/99designs/keyring v1.2.2 // indirect - github.com/AthenZ/athenz v1.11.50 // indirect - github.com/DataDog/zstd v1.5.5 // indirect - github.com/ardielle/ardielle-go v1.5.2 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.13.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/redact v1.1.5 // indirect - github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-playground/locales v0.14.0 // indirect - github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.2+incompatible // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/leodido/go-urn v1.2.1 // indirect - github.com/linkedin/goavro/v2 v2.12.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mtibben/percent v0.2.1 // indirect - github.com/nats-io/nkeys v0.4.7 // indirect - github.com/nats-io/nuid v1.0.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apimachinery v0.29.1 // indirect - k8s.io/client-go v0.29.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index b0601130..00000000 --- a/go.sum +++ /dev/null @@ -1,357 +0,0 @@ -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= -github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= -github.com/AthenZ/athenz v1.11.50 h1:mCyQhI32GHPpPde9NVChI46hpRjw+vX1Z4RN8GCDILE= -github.com/AthenZ/athenz v1.11.50/go.mod h1:HfKWur/iDpTKNb2TVaKKy4mt+Qa0PnZpIOqcmR9/i+Q= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/apache/pulsar-client-go v0.12.0 h1:rrMlwpr6IgLRPXLRRh2vSlcw5tGV2PUSjZwmqgh2B2I= -github.com/apache/pulsar-client-go v0.12.0/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk= -github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4= -github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M= -github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= -github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= -github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= -github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/emicklei/go-restful-openapi/v2 v2.9.2-0.20231020145053-a5b7d60bb267 h1:9hKp1vLTq4I9hA/hhZHOUTNX8DGFdLsLMl9pHl9VJAA= -github.com/emicklei/go-restful-openapi/v2 v2.9.2-0.20231020145053-a5b7d60bb267/go.mod h1:4CTuOXHFg3jkvCpnXN+Wkw5prVUnP8hIACssJTYorWo= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= -github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= -github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/linkedin/goavro/v2 v2.12.0 h1:rIQQSj8jdAUlKQh6DttK8wCRv4t4QO09g1C4aBWXslg= -github.com/linkedin/goavro/v2 v2.12.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= -github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= -github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= -github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= -github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= -github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= -github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tetratelabs/wazero v1.6.0 h1:z0H1iikCdP8t+q341xqepY4EWvHEw8Es7tlqiVzlP3g= -github.com/tetratelabs/wazero v1.6.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= -github.com/wirelessr/avroschema v0.0.0-20240111032105-ef4f4560e2a7 h1:8W0F/PiIV6W/yl2JNfPIVey3mYrdAz/h77RUMIcBIUs= -github.com/wirelessr/avroschema v0.0.0-20240111032105-ef4f4560e2a7/go.mod h1:ivMyAKRe5TqRXC665a1Lv9cWLbkua2K1dnJYslCYi00= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/license-checker/license-checker.sh b/license-checker/license-checker.sh deleted file mode 100755 index 13777338..00000000 --- a/license-checker/license-checker.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xe - -cd "$(git rev-parse --show-toplevel)" - -LICENSE_CHECKER="bin/license-header-checker" - -if [ ! -f "$LICENSE_CHECKER" ]; then - echo "license-checker not found, building it..." - export BINDIR=bin && curl -s https://raw.githubusercontent.com/lluissm/license-header-checker/master/install.sh | bash -fi - -$LICENSE_CHECKER -a -r -i bin,admin/client,common/run.go,common/signal.go,fs/runtime/external/model,clients,operator ./license-checker/license-header.txt . go -$LICENSE_CHECKER -a -r ./license-checker/license-header.txt . proto -$LICENSE_CHECKER -a -r -i bin,admin/client,.chglog,operator ./license-checker/license-header-sh.txt . sh yaml yml -$LICENSE_CHECKER -a -r -i bin,admin/client,.chglog,CHANGELOG.md,operator ./license-checker/license-header-md.txt . md - -if [[ -z $(git status -s) ]]; then - echo "No license header issues found" -else - echo "$(git status)" - echo "License header issues found" - exit 1 -fi diff --git a/license-checker/license-header-md.txt b/license-checker/license-header-md.txt deleted file mode 100644 index b40ad9ca..00000000 --- a/license-checker/license-header-md.txt +++ /dev/null @@ -1,15 +0,0 @@ - \ No newline at end of file diff --git a/license-checker/license-header-sh.txt b/license-checker/license-header-sh.txt deleted file mode 100644 index dc0992bf..00000000 --- a/license-checker/license-header-sh.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/license-checker/license-header.txt b/license-checker/license-header.txt deleted file mode 100644 index ac4d365d..00000000 --- a/license-checker/license-header.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ \ No newline at end of file diff --git a/operator/.devcontainer/devcontainer.json b/operator/.devcontainer/devcontainer.json deleted file mode 100644 index 0e0eed21..00000000 --- a/operator/.devcontainer/devcontainer.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "Kubebuilder DevContainer", - "image": "docker.io/golang:1.23", - "features": { - "ghcr.io/devcontainers/features/docker-in-docker:2": {}, - "ghcr.io/devcontainers/features/git:1": {} - }, - - "runArgs": ["--network=host"], - - "customizations": { - "vscode": { - "settings": { - "terminal.integrated.shell.linux": "/bin/bash" - }, - "extensions": [ - "ms-kubernetes-tools.vscode-kubernetes-tools", - "ms-azuretools.vscode-docker" - ] - } - }, - - "onCreateCommand": "bash .devcontainer/post-install.sh" -} - diff --git a/operator/.devcontainer/post-install.sh b/operator/.devcontainer/post-install.sh deleted file mode 100644 index 265c43ee..00000000 --- a/operator/.devcontainer/post-install.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -x - -curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 -chmod +x ./kind -mv ./kind /usr/local/bin/kind - -curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/amd64 -chmod +x kubebuilder -mv kubebuilder /usr/local/bin/ - -KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) -curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" -chmod +x kubectl -mv kubectl /usr/local/bin/kubectl - -docker network create -d=bridge --subnet=172.19.0.0/24 kind - -kind version -kubebuilder version -docker --version -go version -kubectl version --client diff --git a/operator/.dockerignore b/operator/.dockerignore deleted file mode 100644 index a3aab7af..00000000 --- a/operator/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file -# Ignore build and test binaries. -bin/ diff --git a/operator/.github/workflows/lint.yml b/operator/.github/workflows/lint.yml deleted file mode 100644 index 4951e331..00000000 --- a/operator/.github/workflows/lint.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Lint - -on: - push: - pull_request: - -jobs: - lint: - name: Run on Ubuntu - runs-on: ubuntu-latest - steps: - - name: Clone the code - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - - name: Run linter - uses: golangci/golangci-lint-action@v6 - with: - version: v1.63.4 diff --git a/operator/.github/workflows/test-e2e.yml b/operator/.github/workflows/test-e2e.yml deleted file mode 100644 index b2eda8c3..00000000 --- a/operator/.github/workflows/test-e2e.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: E2E Tests - -on: - push: - pull_request: - -jobs: - test-e2e: - name: Run on Ubuntu - runs-on: ubuntu-latest - steps: - - name: Clone the code - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - - name: Install the latest version of kind - run: | - curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind - - - name: Verify kind installation - run: kind version - - - name: Create kind cluster - run: kind create cluster - - - name: Running Test e2e - run: | - go mod tidy - make test-e2e diff --git a/operator/.github/workflows/test.yml b/operator/.github/workflows/test.yml deleted file mode 100644 index fc2e80d3..00000000 --- a/operator/.github/workflows/test.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Tests - -on: - push: - pull_request: - -jobs: - test: - name: Run on Ubuntu - runs-on: ubuntu-latest - steps: - - name: Clone the code - uses: actions/checkout@v4 - - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - - name: Running Tests - run: | - go mod tidy - make test diff --git a/operator/.gitignore b/operator/.gitignore deleted file mode 100644 index d97ffc51..00000000 --- a/operator/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ - -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib -bin - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Kubernetes Generated files - skip generated files, except for vendored files - -!vendor/**/zz_generated.* - -# editor and IDE paraphernalia -.idea -*.swp -*.swo -*~ diff --git a/operator/.golangci.yml b/operator/.golangci.yml deleted file mode 100644 index 6b297462..00000000 --- a/operator/.golangci.yml +++ /dev/null @@ -1,47 +0,0 @@ -run: - timeout: 5m - allow-parallel-runners: true - -issues: - # don't skip warning about doc comments - # don't exclude the default set of lint - exclude-use-default: false - # restore some of the defaults - # (fill in the rest as needed) - exclude-rules: - - path: "api/*" - linters: - - lll - - path: "internal/*" - linters: - - dupl - - lll -linters: - disable-all: true - enable: - - dupl - - errcheck - - copyloopvar - - ginkgolinter - - goconst - - gocyclo - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - lll - - misspell - - nakedret - - prealloc - - revive - - staticcheck - - typecheck - - unconvert - - unparam - - unused - -linters-settings: - revive: - rules: - - name: comment-spacings diff --git a/operator/DEVELOPER.md b/operator/DEVELOPER.md deleted file mode 100644 index 06149249..00000000 --- a/operator/DEVELOPER.md +++ /dev/null @@ -1,125 +0,0 @@ -## Getting Started - -### Prerequisites - -- go version v1.23.0+ -- docker version 17.03+. -- kubectl version v1.11.3+. -- Access to a Kubernetes v1.11.3+ cluster. - -### To Deploy on the cluster - -**Build and push your image to the location specified by `IMG`:** - -```sh -make docker-build docker-push IMG=/operator:tag -``` - -**NOTE:** This image ought to be published in the personal registry you specified. -And it is required to have access to pull the image from the working environment. -Make sure you have the proper permission to the registry if the above commands don't work. - -**Install the CRDs into the cluster:** - -```sh -make install -``` - -**Deploy the Manager to the cluster with the image specified by `IMG`:** - -```sh -make deploy IMG=/operator:tag -``` - -> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin -> privileges or be logged in as admin. - -**Create instances of your solution** -You can apply the samples (examples) from the config/sample: - -```sh -kubectl apply -k config/samples/ -``` - -> **NOTE**: Ensure that the samples has default values to test it out. - -### To Uninstall - -**Delete the instances (CRs) from the cluster:** - -```sh -kubectl delete -k config/samples/ -``` - -**Delete the APIs(CRDs) from the cluster:** - -```sh -make uninstall -``` - -**UnDeploy the controller from the cluster:** - -```sh -make undeploy -``` - -## Project Distribution - -Following the options to release and provide this solution to the users. - -### By providing a bundle with all YAML files - -1. Build the installer for the image built and published in the registry: - - ```sh - make build-installer IMG=/operator:tag - ``` - - **NOTE:** The makefile target mentioned above generates an 'install.yaml' - file in the dist directory. This file contains all the resources built - with Kustomize, which are necessary to install this project without its - dependencies. - -2. Using the installer - - Users can just run 'kubectl apply -f ' to install - the project, i.e.: - - ```sh - kubectl apply -f https://raw.githubusercontent.com//operator//dist/install.yaml - ``` - -### By providing a Helm Chart - -1. Build the chart using the optional helm plugin - - ```sh - kubebuilder edit --plugins=helm/v1-alpha - ``` - -2. See that a chart was generated under 'dist/chart', and users - can obtain this solution from there. - -**NOTE:** If you change the project, you need to update the Helm Chart -using the same command above to sync the latest changes. Furthermore, -if you create webhooks, you need to use the above command with -the '--force' flag and manually ensure that any custom configuration -previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml' -is manually re-applied afterwards. - -**NOTE:** Run `make help` for more information on all potential `make` targets - -### CRD Updates - -When CRD definitions are updated in `operator/config/crd/bases/`, you need to ensure the Helm chart CRD templates are also updated to match. The Helm chart CRD templates are located in `operator/deploy/chart/templates/crd/`. - -To update the Helm chart CRD templates: - -1. Update the CRD definitions in `operator/config/crd/bases/` -2. Manually update the corresponding files in `operator/deploy/chart/templates/crd/` to match the base definitions -3. Ensure any Helm-specific templating (like `{{- if .Values.crd.enable }}`) is preserved -4. Test the changes to ensure the CRDs work correctly - -**Important:** The Helm chart CRD templates should always reflect the latest changes from the base CRD definitions to ensure consistency between direct CRD installation and Helm chart installation. - -More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) \ No newline at end of file diff --git a/operator/Dockerfile b/operator/Dockerfile deleted file mode 100644 index a407f656..00000000 --- a/operator/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build the manager binary -FROM docker.io/golang:1.23 AS builder -ARG TARGETOS -ARG TARGETARCH - -WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download - -# Copy the go source -COPY cmd/main.go cmd/main.go -COPY api/ api/ -COPY internal/ internal/ -COPY utils/ utils/ - -# Build -# the GOARCH has not a default value to allow the binary be built according to the host where the command -# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO -# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, -# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go - -# Use distroless as minimal base image to package the manager binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot -WORKDIR / -COPY --from=builder /workspace/manager . -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/operator/Makefile b/operator/Makefile deleted file mode 100644 index 3c557d76..00000000 --- a/operator/Makefile +++ /dev/null @@ -1,264 +0,0 @@ -# Image URL to use all building/pushing image targets -IMG ?= functionstream/operator:latest - -# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) -ifeq (,$(shell go env GOBIN)) -GOBIN=$(shell go env GOPATH)/bin -else -GOBIN=$(shell go env GOBIN) -endif - -# CONTAINER_TOOL defines the container tool to be used for building images. -# Be aware that the target commands are only tested with Docker which is -# scaffolded by default. However, you might want to replace it to use other -# tools. (i.e. podman) -CONTAINER_TOOL ?= docker - -# Setting SHELL to bash allows bash commands to be executed by recipes. -# Options are set to exit when a recipe line exits non-zero or a piped command fails. -SHELL = /usr/bin/env bash -o pipefail -.SHELLFLAGS = -ec - -.PHONY: all -all: build - -##@ General - -# The help target prints out all targets with their descriptions organized -# beneath their categories. The categories are represented by '##@' and the -# target descriptions by '##'. The awk command is responsible for reading the -# entire set of makefiles included in this invocation, looking for lines of the -# file as xyz: ## something, and then pretty-format the target and help. Then, -# if there's a line with ##@ something, that gets pretty-printed as a category. -# More info on the usage of ANSI control characters for terminal formatting: -# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters -# More info on the awk command: -# http://linuxcommand.org/lc3_adv_awk.php - -.PHONY: help -help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - -##@ Development - -.PHONY: manifests -manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases - @mkdir -p deploy/crds - @for crd in config/crd/bases/*.yaml; do \ - fname=$$(basename $$crd); \ - echo '# This file is auto-copied from config/crd/bases/$$fname' > deploy/crds/$$fname; \ - echo '# Do not edit manually.' >> deploy/crds/$$fname; \ - cat $$crd >> deploy/crds/$$fname; \ - done - -.PHONY: generate -generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." - -.PHONY: fmt -fmt: ## Run go fmt against code. - go fmt ./... - -.PHONY: vet -vet: ## Run go vet against code. - go vet ./... - -.PHONY: test -test: manifests generate fmt vet setup-envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out - -# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. -# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. -# CertManager is installed by default; skip with: -# - CERT_MANAGER_INSTALL_SKIP=true -.PHONY: test-e2e -test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. - @command -v $(KIND) >/dev/null 2>&1 || { \ - echo "Kind is not installed. Please install Kind manually."; \ - exit 1; \ - } - @$(KIND) get clusters | grep -q 'kind' || { \ - echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \ - exit 1; \ - } - go test ./test/e2e/ -v -ginkgo.v - -.PHONY: lint -lint: golangci-lint ## Run golangci-lint linter - $(GOLANGCI_LINT) run - -.PHONY: lint-fix -lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes - $(GOLANGCI_LINT) run --fix - -.PHONY: lint-config -lint-config: golangci-lint ## Verify golangci-lint linter configuration - $(GOLANGCI_LINT) config verify - -##@ Build - -.PHONY: build -build: manifests generate fmt vet ## Build manager binary. - go build -o bin/manager cmd/main.go - -.PHONY: run -run: manifests generate fmt vet ## Run a controller from your host. - go run ./cmd/main.go - -# If you wish to build the manager image targeting other platforms you can use the --platform flag. -# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. -# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ -.PHONY: docker-build -docker-build: ## Build docker image with the manager. - $(CONTAINER_TOOL) build -t ${IMG} . - -.PHONY: docker-push -docker-push: ## Push docker image with the manager. - $(CONTAINER_TOOL) push ${IMG} - -# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple -# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: -# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ -# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ -# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) -# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. -#PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le -PLATFORMS ?= linux/arm64,linux/amd64 -.PHONY: docker-buildx -docker-buildx: ## Build and push docker image for the manager for cross-platform support - # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile - sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - - $(CONTAINER_TOOL) buildx create --name operator-builder - $(CONTAINER_TOOL) buildx use operator-builder - - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - - $(CONTAINER_TOOL) buildx rm operator-builder - rm Dockerfile.cross - -.PHONY: build-installer -build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. - mkdir -p dist - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default > dist/install.yaml - -##@ Deployment - -ifndef ignore-not-found - ignore-not-found = false -endif - -.PHONY: install -install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - - -.PHONY: uninstall -uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. - $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - - -.PHONY: deploy -deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - - -.PHONY: undeploy -undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. - $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - - -##@ Dependencies - -## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin -$(LOCALBIN): - mkdir -p $(LOCALBIN) - -## Tool Binaries -KUBECTL ?= kubectl -KIND ?= kind -KUSTOMIZE ?= $(LOCALBIN)/kustomize -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -ENVTEST ?= $(LOCALBIN)/setup-envtest -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint -CODEGEN ?= $(LOCALBIN)/code-generator - -## Tool Versions -KUSTOMIZE_VERSION ?= v5.6.0 -CONTROLLER_TOOLS_VERSION ?= v0.17.2 -CODEGEN_VERSION ?= v0.32.1 -#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) -ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') -#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) -ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') -GOLANGCI_LINT_VERSION ?= v1.63.4 - -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) - -.PHONY: setup-envtest -setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. - @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." - @$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path || { \ - echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ - exit 1; \ - } - -.PHONY: envtest -envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. -$(ENVTEST): $(LOCALBIN) - $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) - -.PHONY: golangci-lint -golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. -$(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) - -.PHONY: code-generator -code-generator: $(CODEGEN) ## Download code-generator locally if necessary. -$(CODEGEN): $(LOCALBIN) - $(call go-install-tool,$(CODEGEN),k8s.io/code-generator/cmd/...,$(CODEGEN_VERSION)) - -.PHONY: generate-client -generate-client: ## Generate client SDK using code-generator - @echo "Generating client SDK..." - @mkdir -p pkg/client - @go install k8s.io/code-generator/cmd/client-gen@v0.32.1 - @go install k8s.io/code-generator/cmd/lister-gen@v0.32.1 - @go install k8s.io/code-generator/cmd/informer-gen@v0.32.1 - @go install k8s.io/code-generator/cmd/deepcopy-gen@v0.32.1 - @hack/update-codegen.sh - -# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist -# $1 - target path with name of binary -# $2 - package url which can be installed -# $3 - specific version of package -define go-install-tool -@[ -f "$(1)-$(3)" ] || { \ -set -e; \ -package=$(2)@$(3) ;\ -echo "Downloading $${package}" ;\ -rm -f $(1) || true ;\ -GOBIN=$(LOCALBIN) go install $${package} ;\ -mv $(1) $(1)-$(3) ;\ -} ;\ -ln -sf $(1)-$(3) $(1) -endef - -generate-helm: - rm -rf deploy - rm -rf dist - kubebuilder edit --plugins=helm/v1-alpha --force - mv dist deploy - patch -p1 < hack/helm.patch - -generate-hack-helm-patch: - kubebuilder edit --plugins=helm/v1-alpha --force - git diff --no-index dist deploy > hack/helm.patch || true - -generate-deploy-yaml: - helm template functionstream ./deploy/chart --set pulsar.standalone.enable=true --set createNamespace=true --namespace function-stream --create-namespace > ./scripts/deploy.yaml \ No newline at end of file diff --git a/operator/PROJECT b/operator/PROJECT deleted file mode 100644 index 1ebc9a4d..00000000 --- a/operator/PROJECT +++ /dev/null @@ -1,39 +0,0 @@ -# Code generated by tool. DO NOT EDIT. -# This file is used to track the info used to scaffold your project -# and allow the plugins properly work. -# More info: https://book.kubebuilder.io/reference/project-config.html -domain: functionstream.github.io -layout: -- go.kubebuilder.io/v4 -plugins: - helm.kubebuilder.io/v1-alpha: {} -projectName: operator -repo: github.com/FunctionStream/function-stream/operator -resources: -- api: - crdVersion: v1 - namespaced: true - controller: true - domain: functionstream.github.io - group: fs - kind: Package - path: github.com/FunctionStream/function-stream/operator/api/v1alpha1 - version: v1alpha1 - webhooks: - defaulting: true - validation: true - webhookVersion: v1 -- api: - crdVersion: v1 - namespaced: true - controller: true - domain: functionstream.github.io - group: fs - kind: Function - path: github.com/FunctionStream/function-stream/operator/api/v1alpha1 - version: v1alpha1 - webhooks: - defaulting: true - validation: true - webhookVersion: v1 -version: "3" diff --git a/operator/README.md b/operator/README.md deleted file mode 100644 index 68ca56f2..00000000 --- a/operator/README.md +++ /dev/null @@ -1,308 +0,0 @@ -# Function Stream Operator - -FunctionStream Operator is a Kubernetes operator designed to manage custom resources for serverless function -orchestration and package management on Kubernetes clusters. - -## 🚀 Get Started Now! - -**New to FunctionStream Operator?** This step-by-step tutorial will guide you through everything you need to know. - -## What is FunctionStream Operator? - -This project provides a Kubernetes operator that automates the lifecycle of custom resources such as Functions and -Packages. It enables users to define, deploy, and manage serverless functions and their dependencies using -Kubernetes-native APIs. The operator ensures that the desired state specified in custom resources is reflected in the -actual cluster state, supporting extensibility and integration with cloud-native workflows. - -## 📋 Prerequisites - -Before you begin, ensure you have: - -- [Helm](https://helm.sh/) v3.0+ -- Access to a Kubernetes v1.19+ cluster -- `kubectl` configured to communicate with your cluster -- cert-manager (required for TLS certificates) - -## 🛠️ Installation - -The recommended way to deploy the FunctionStream Operator is using the provided Helm chart. - -### 1. Install cert-manager - -The FunctionStream Operator requires cert-manager for TLS certificates: - -```sh -./scripts/install-cert-manager.sh -``` - -### 2. Deploy the Operator - -**Option A: With Pulsar Standalone (Recommended for testing)** -```bash -helm install fs ./deploy/chart \ - --namespace fs --create-namespace \ - --set pulsar.standalone.enable=true -``` - -**Option B: With External Pulsar Cluster** -```bash -helm install fs ./deploy/chart \ - --namespace fs --create-namespace \ - --set pulsar.serviceUrl=pulsar://your-pulsar-cluster:6650 -``` - -### 3. Verify Installation - -```bash -kubectl get pods -n fs -kubectl get crd | grep functionstream -``` - -## 📖 Next Steps - -
- -### 🎯 **Ready to deploy your first function?** - -**[📖 Complete Tutorial](TUTORIAL.md)** - Your step-by-step guide to success! - -
- -This comprehensive tutorial will teach you how to: -- ✅ Create your first package and function -- ✅ Test your deployment with real examples -- ✅ Monitor and troubleshoot issues -- ✅ Understand advanced configurations -- ✅ Follow best practices - -**Estimated time**: 15-20 minutes - -## 📁 Examples - -Ready-to-use examples are available: - -- `examples/package.yaml` - Sample package definition -- `examples/function.yaml` - Sample function that uses the package - -## 📚 Documentation - -### Getting Started -- **[📖 Complete Tutorial](TUTORIAL.md)** - Step-by-step guide with detailed explanations - -### Development -- **[🔧 Developer Guide](DEVELOPER.md)** - Information for contributors and developers - -## Configuration - -#### Pulsar Configuration - -The chart supports two modes for Pulsar: - -##### 1. Pulsar Standalone Mode - -When `pulsar.standalone.enable=true`, the chart will: - -- Deploy a Pulsar standalone StatefulSet in the same namespace -- Create persistent storage for Pulsar data and logs -- Expose Pulsar service on ports 6650 (Pulsar) and 8080 (Admin) -- Automatically configure the operator to connect to the standalone Pulsar - -```yaml -pulsar: - standalone: - enable: true - image: - repository: apachepulsar/pulsar - tag: "3.4.0" - resources: - limits: - cpu: 1000m - memory: 2Gi - requests: - cpu: 500m - memory: 1Gi - storage: - size: 10Gi - storageClass: "" # Use default storage class if empty - service: - type: ClusterIP - ports: - pulsar: 6650 - admin: 8080 -``` - -##### 2. External Pulsar Mode - -When `pulsar.standalone.enable=false` (default), you can specify an external Pulsar cluster: - -```yaml -pulsar: - serviceUrl: pulsar://your-pulsar-cluster:6650 - authPlugin: "" # Optional: Pulsar authentication plugin - authParams: "" # Optional: Pulsar authentication parameters -``` - -#### Manager Configuration - -```yaml -controllerManager: - replicas: 1 - container: - image: - repository: functionstream/operator - tag: latest - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi -``` - -#### Other Features - -- **RBAC**: Enable/disable RBAC permissions -- **CRDs**: Control CRD installation and retention -- **Metrics**: Enable metrics export -- **Webhooks**: Enable admission webhooks -- **Prometheus**: Enable ServiceMonitor for Prometheus -- **Cert-Manager**: Enable cert-manager integration -- **Network Policies**: Enable network policies - -### Accessing Pulsar - -#### When Using Pulsar Standalone - -The Pulsar standalone service is exposed as: - -- **Pulsar Service**: `pulsar-standalone:6650` -- **Admin Interface**: `pulsar-standalone:8080` - -You can access the admin interface by port-forwarding: - -```bash -kubectl port-forward svc/pulsar-standalone 8080:8080 -``` - -Then visit `http://localhost:8080` in your browser. - -#### Pulsar Client Configuration - -When using Pulsar standalone, your Pulsar clients should connect to: - -``` -pulsar://pulsar-standalone:6650 -``` - -### Storage - -When Pulsar standalone is enabled, the chart creates two PersistentVolumeClaims: - -- `pulsar-data`: For Pulsar data storage -- `pulsar-logs`: For Pulsar logs storage - -Both use the same storage size and storage class configuration. - -### Troubleshooting - -#### Certificate Mounting Issues - -If you encounter errors like: - -``` -Warning FailedMount 95s (x9 over 3m43s) kubelet MountVolume.SetUp failed for volume "metrics-certs" : secret "metrics-server-cert" not found -Warning FailedMount 95s (x9 over 3m43s) kubelet MountVolume.SetUp failed for volume "webhook-cert" : secret "webhook-server-cert" not found -``` - -This happens because cert-manager is not installed or not running in your cluster. The operator requires cert-manager to -create TLS certificates for webhooks and metrics. - -**Solution:** - -1. **Verify cert-manager installation:** - ```bash - kubectl get pods -n cert-manager - ``` - All cert-manager pods should be in `Running` status. - -2. **Check cert-manager namespace exists:** - ```bash - kubectl get namespace cert-manager - ``` - -3. **If cert-manager is not installed, install it:** - ```bash - # Using the provided script - chmod +x scripts/install-cert-manager.sh - ./scripts/install-cert-manager.sh - - # Or manually - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.yaml - ``` - -4. **Wait for cert-manager to be ready:** - ```bash - kubectl wait --for=jsonpath='{.status.phase}=Running' pods -l app.kubernetes.io/instance=cert-manager -n cert-manager --timeout=300s - ``` - -5. **Reinstall the operator after cert-manager is ready:** - ```bash - helm uninstall fs -n fs - helm install fs ./deploy/chart --namespace fs --create-namespace - ``` - -#### Check cert-manager Status - -To verify that cert-manager is working correctly: - -```bash -# Check cert-manager pods -kubectl get pods -n cert-manager - -# Check cert-manager CRDs -kubectl get crd | grep cert-manager - -# Check cert-manager logs -kubectl logs -n cert-manager -l app.kubernetes.io/name=cert-manager -``` - -### Upgrading - -To upgrade the operator after making changes or pulling a new chart version: - -```sh -helm upgrade fs ./deploy/chart \ - --namespace fs -``` - -### Uninstallation - -To uninstall the operator and all associated resources: - -```bash -helm uninstall fs -n fs -``` - -**Note**: - -- By default, CRDs are deleted during uninstall. If you want to retain CRDs after uninstall, set `crd.keep: true` in - your values file. Be aware that retaining CRDs will also prevent the deletion of any custom resources (Functions, - Packages, etc.) that depend on these CRDs. -- If you enabled Pulsar standalone, the persistent volumes will remain unless you manually delete them. - -## License - -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/operator/TUTORIAL.md b/operator/TUTORIAL.md deleted file mode 100644 index 842f2e50..00000000 --- a/operator/TUTORIAL.md +++ /dev/null @@ -1,314 +0,0 @@ -# FunctionStream Operator Tutorial - -Welcome to the FunctionStream Operator tutorial! This guide will walk you through creating and deploying your first serverless function using the FunctionStream Operator on Kubernetes. - -## Overview - -FunctionStream Operator is a Kubernetes operator that manages custom resources for serverless function orchestration and package management. In this tutorial, you'll learn how to: - -- Deploy a Package resource that defines a reusable function module -- Deploy a Function resource that instantiates the package -- Monitor and manage your deployed functions -- Understand the architecture and components - -## Prerequisites - -Before you begin, ensure you have: - -- A Kubernetes cluster (v1.19+) with kubectl configured -- FunctionStream Operator installed (see [Installation Guide](README.md)) -- Basic understanding of Kubernetes concepts - -Follow the [Installation Guide](README.md) to set up the FunctionStream Operator if you haven't done so already. - -## Step 1: Verify Installation - -First, let's verify that the FunctionStream Operator is properly installed: - -```bash -# Check if the operator namespace exists -kubectl get namespace fs - -# Verify operator pods are running -kubectl get pods -n fs - -# Check that Custom Resource Definitions are installed -kubectl get crd | grep functionstream -``` - -Expected output: -``` -NAME READY STATUS RESTARTS AGE -fs-pulsar-standalone-0 1/1 Running 1 21h -operator-controller-manager-c99489d8b-zk78h 1/1 Running 0 21h - -NAME CREATED AT -functions.fs.functionstream.github.io 2025-06-23T14:53:30Z -packages.fs.functionstream.github.io 2025-06-23T14:53:30Z -``` - -## Step 2: Create Your First Package - -A Package defines a reusable function module with its container image and available functions. Let's create a simple "current time" package: - -```yaml -# examples/package.yaml -apiVersion: fs.functionstream.github.io/v1alpha1 -kind: Package -metadata: - name: current-time -spec: - displayName: Get Current Time - logo: "" - description: "A function for getting the current time." - functionType: - cloud: - image: "functionstream/time-function:latest" - modules: - getCurrentTime: - displayName: Get Current Time - description: "A tool that returns the current time." -``` - -### Package Components Explained - -- **`displayName`**: Human-readable name for the package -- **`description`**: Detailed description of what the package does -- **`functionType.cloud.image`**: Docker image containing the function code -- **`modules`**: Available functions within the package - - Each module has a unique key (e.g., `getCurrentTime`) - - Modules can have their own display names and descriptions - -### Deploy the Package - -```bash -kubectl apply -f examples/package.yaml -``` - -Verify the package was created: - -```bash -kubectl get packages -kubectl describe package current-time -``` - -Expected output: -``` -NAME AGE -current-time 21h - -Name: current-time -Namespace: default -Spec: - Description: A function for getting the current time. - Display Name: Get Current Time - Function Type: - Cloud: - Image: functionstream/time-function:latest - Modules: - Get Current Time: - Description: A tool that returns the current time. - Display Name: Get Current Time -``` - -## Step 3: Create Your First Function - -A Function instantiates a package with specific configuration and request sources. Let's create a function that uses our current-time package: - -```yaml -# examples/function.yaml -apiVersion: fs.functionstream.github.io/v1alpha1 -kind: Function -metadata: - name: current-time-function -spec: - displayName: Get Current Time Function - package: current-time - module: getCurrentTime - requestSource: # RPC - pulsar: - topic: request_current_time - source: - pulsar: - topic: current_time_source - sink: - pulsar: - topic: current_time_sink -``` - -### Function Components Explained - -- **`package`**: References the package name to instantiate -- **`module`**: Specifies which module from the package to use -- **`requestSource.pulsar.topic`**: Pulsar topic that triggers the function -- **`displayName`**: Human-readable name for the function instance - -### Deploy the Function - -```bash -kubectl apply -f examples/function.yaml -``` - -Verify the function was created: - -```bash -kubectl get functions -kubectl describe function current-time-function -``` - -Expected output: -``` -NAME AGE -current-time-function 21h - -Name: current-time-function -Namespace: default -Labels: package=current-time -Spec: - Display Name: Get Current Time Function - Module: getCurrentTime - Package: current-time - Request Source: - Pulsar: - Topic: request_current_time -Status: - Available Replicas: 1 - Ready Replicas: 1 - Replicas: 1 - Updated Replicas: 1 -``` - -## Step 4: Monitor Function Deployment - -The operator automatically creates Kubernetes resources to run your function. Let's check what was created: - -```bash -# Check the function pod -kubectl get pods -l function=current-time-function - -# Check the deployment -kubectl get deployments -l function=current-time-function -``` - -Expected output: -``` -NAME READY STATUS RESTARTS AGE -function-current-time-function-b8b89f856-brvx7 1/1 Running 0 21h - -NAME READY UP-TO-DATE AVAILABLE AGE -function-current-time-function 1/1 1 1 21h -``` - -## Step 5: Test Your Function - -Now let's test the function by sending a message to the Pulsar topic. First, let's access Pulsar: - -```bash -# Port forward Pulsar service -kubectl port-forward svc/fs-pulsar-standalone 6650:6650 -n fs & -kubectl port-forward svc/fs-pulsar-standalone 8080:8080 -n fs & -``` - -### Using Pulsar Admin Interface - -1. Open your browser and navigate to `http://localhost:8080` -2. You'll see the Pulsar admin interface -3. Navigate to "Topics" to see the `request_current_time` topic - -### Using Pulsar Client - -You can test the function by shelling into the Pulsar standalone pod: - -```bash -# Shell into the Pulsar standalone pod -kubectl exec -it fs-pulsar-standalone-0 -n fs -- bash -``` - -**1. Start a consumer in a separate terminal window** - -Open a new terminal window and shell into the Pulsar pod: - -```bash -kubectl exec -it fs-pulsar-standalone-0 -n fs -- bash -``` - -Then start consuming messages from the function output topic: - -```bash -# Start consuming messages from the function output topic -pulsar-client consume current_time_sink -s "test-subscription" -``` - -This will start listening for messages from the function's output topic. - -**2. Send a test message in another terminal window** - -In your original terminal window (or another terminal), shell into the Pulsar pod and send a test message: - -```bash -kubectl exec -it fs-pulsar-standalone-0 -n fs -- bash -``` - -Then send a test message to trigger the function: - -```bash -# Send a test message to trigger the function -pulsar-client produce request_current_time -m "{}" -``` - -You should see the function process the message and output the current time to the `current_time_sink` topic, which will appear in your consumer window. - -``` -publishTime:[1750775397910], eventTime:[1750775397907], key:[null], properties:[], content:{"result": "The current time is 2025-06-24 14:29:57 ."} -``` - -## Step 6: Cleanup - -When you're done testing, clean up the resources: - -```bash -# Delete the function -kubectl delete function current-time-function - -# Delete the package -kubectl delete package current-time - -# Verify cleanup -kubectl get packages -kubectl get functions -kubectl get pods -l function=current-time-function -``` - -## Troubleshooting - -### Common Issues - -1. **Package Not Found** - ``` - Error: package "current-time" not found - ``` - **Solution**: Ensure the package is created before the function - -2. **Image Pull Errors** - ``` - Error: ImagePullBackOff - ``` - **Solution**: Check if the container image exists and is accessible - -3. **Pulsar Connection Issues** - ``` - Error: Failed to connect to Pulsar - ``` - **Solution**: Verify Pulsar is running and accessible - -### Debug Commands - -```bash -# Check operator logs -kubectl logs -n fs -l app.kubernetes.io/name=operator - -# Check function pod events -kubectl describe pod -l function=current-time-function -``` - -Congratulations! You've successfully deployed your first serverless function using FunctionStream Operator. The operator handled all the complexity of managing Kubernetes resources, scaling, and integration with Pulsar, allowing you to focus on your function logic. \ No newline at end of file diff --git a/operator/api/v1alpha1/function_types.go b/operator/api/v1alpha1/function_types.go deleted file mode 100644 index 7546ea6e..00000000 --- a/operator/api/v1alpha1/function_types.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PackageRef defines a reference to a Package resource -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type PackageRef struct { - // Name of the Package resource - // +kubebuilder:validation:Required - Name string `json:"name"` - // Namespace of the Package resource - // +kubebuilder:validation:Optional - Namespace string `json:"namespace,omitempty"` -} - -// FunctionSpec defines the desired state of Function -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type FunctionSpec struct { - // Display name of the function - // +kubebuilder:validation:Optional - DisplayName string `json:"displayName,omitempty"` - // Description of the function - // +kubebuilder:validation:Optional - Description string `json:"description,omitempty"` - // Package reference - // +kubebuilder:validation:Required - PackageRef PackageRef `json:"packageRef"` - // Module name - // +kubebuilder:validation:Required - Module string `json:"module"` - // Number of replicas for the function deployment - // +kubebuilder:validation:Optional - // +kubebuilder:default=1 - Replicas *int32 `json:"replicas,omitempty"` - // +kubebuilder:validation:Optional - SubscriptionName string `json:"subscriptionName,omitempty"` - // List of sources - // +kubebuilder:validation:Optional - Sources []SourceSpec `json:"sources,omitempty"` - // Request source - // +kubebuilder:validation:Optional - RequestSource *SourceSpec `json:"requestSource,omitempty"` - // Sink specifies the sink configuration - // +kubebuilder:validation:Optional - Sink *SinkSpec `json:"sink,omitempty"` - // Configurations as key-value pairs - // +kubebuilder:validation:Optional - Config map[string]v1.JSON `json:"config,omitempty"` -} - -// SourceSpec defines a source or sink specification -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type SourceSpec struct { - // Pulsar source specification - // +kubebuilder:validation:Optional - Pulsar *PulsarSourceSpec `json:"pulsar,omitempty"` -} - -// PulsarSourceSpec defines the Pulsar source details -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type PulsarSourceSpec struct { - // Topic name - // +kubebuilder:validation:Required - Topic string `json:"topic"` -} - -// SinkSpec defines a sink specification -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type SinkSpec struct { - // Pulsar sink specification - // +kubebuilder:validation:Optional - Pulsar *PulsarSinkSpec `json:"pulsar,omitempty"` -} - -// PulsarSinkSpec defines the Pulsar sink details -// +kubebuilder:object:generate=true -// +kubebuilder:validation:Optional -type PulsarSinkSpec struct { - // Topic name - // +kubebuilder:validation:Required - Topic string `json:"topic"` -} - -// FunctionStatus defines the observed state of Function -type FunctionStatus struct { - // Number of available pods (ready for at least minReadySeconds) - AvailableReplicas int32 `json:"availableReplicas,omitempty"` - // Total number of ready pods - ReadyReplicas int32 `json:"readyReplicas,omitempty"` - // Total number of non-terminated pods targeted by this deployment - Replicas int32 `json:"replicas,omitempty"` - // Total number of updated pods - UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` - // Most recent generation observed for this Function - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status - -// Function is the Schema for the functions API. -type Function struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FunctionSpec `json:"spec,omitempty"` - Status FunctionStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// FunctionList contains a list of Function. -type FunctionList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Function `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Function{}, &FunctionList{}) -} diff --git a/operator/api/v1alpha1/groupversion_info.go b/operator/api/v1alpha1/groupversion_info.go deleted file mode 100644 index fc3042f1..00000000 --- a/operator/api/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the fs v1alpha1 API group. -// +kubebuilder:object:generate=true -// +groupName=fs.functionstream.github.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects. - GroupVersion = schema.GroupVersion{Group: "fs.functionstream.github.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/operator/api/v1alpha1/packages_types.go b/operator/api/v1alpha1/packages_types.go deleted file mode 100644 index c51b3c47..00000000 --- a/operator/api/v1alpha1/packages_types.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConfigItem defines a configuration item for a module -type ConfigItem struct { - // DisplayName is the human-readable name of the config item - // +kubebuilder:validation:Optional - DisplayName string `json:"displayName,omitempty"` - // Description provides additional information about the config item - // +kubebuilder:validation:Optional - Description string `json:"description,omitempty"` - // Type specifies the data type of the config item - // +kubebuilder:validation:Optional - Type string `json:"type,omitempty"` - // Required indicates whether this config item is mandatory - // +kubebuilder:validation:Optional - Required bool `json:"required,omitempty"` -} - -// Module defines a module within a package -type Module struct { - // DisplayName is the human-readable name of the module - // +kubebuilder:validation:Optional - DisplayName string `json:"displayName,omitempty"` - // Description provides additional information about the module - // +kubebuilder:validation:Optional - Description string `json:"description,omitempty"` - // SourceSchema defines the input schema for the module - // +kubebuilder:validation:Optional - SourceSchema string `json:"sourceSchema,omitempty"` - // SinkSchema defines the output schema for the module - // +kubebuilder:validation:Optional - SinkSchema string `json:"sinkSchema,omitempty"` - // Config is a list of configuration items for the module - // +kubebuilder:validation:Optional - Config map[string]ConfigItem `json:"config,omitempty"` -} - -// CloudType defines cloud function package configuration -type CloudType struct { - // Image specifies the container image for cloud deployment - Image string `json:"image"` -} - -// FunctionType defines the function type configuration -type FunctionType struct { - // Cloud contains cloud function package configuration - // +kubebuilder:validation:Optional - Cloud *CloudType `json:"cloud,omitempty"` -} - -// PackageSpec defines the desired state of Package -type PackageSpec struct { - // DisplayName is the human-readable name of the package - // +kubebuilder:validation:Optional - DisplayName string `json:"displayName,omitempty"` - // Logo is the URL or base64 encoded image for the package logo - // +kubebuilder:validation:Optional - Logo string `json:"logo,omitempty"` - // Description provides additional information about the package - // +kubebuilder:validation:Optional - Description string `json:"description,omitempty"` - // FunctionType contains function type configuration - FunctionType FunctionType `json:"functionType"` - // Modules is a map of module names to their configurations - Modules map[string]Module `json:"modules"` -} - -// PackageStatus defines the observed state of Package. -type PackageStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=packages,scope=Namespaced,singular=package,shortName=pkg - -// Package is the Schema for the packages API. -type Package struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PackageSpec `json:"spec,omitempty"` - Status PackageStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PackageList contains a list of Package. -type PackageList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Package `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Package{}, &PackageList{}) -} diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index e86d3102..00000000 --- a/operator/api/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,399 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudType) DeepCopyInto(out *CloudType) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudType. -func (in *CloudType) DeepCopy() *CloudType { - if in == nil { - return nil - } - out := new(CloudType) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigItem) DeepCopyInto(out *ConfigItem) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigItem. -func (in *ConfigItem) DeepCopy() *ConfigItem { - if in == nil { - return nil - } - out := new(ConfigItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Function) DeepCopyInto(out *Function) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. -func (in *Function) DeepCopy() *Function { - if in == nil { - return nil - } - out := new(Function) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Function) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FunctionList) DeepCopyInto(out *FunctionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Function, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. -func (in *FunctionList) DeepCopy() *FunctionList { - if in == nil { - return nil - } - out := new(FunctionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FunctionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { - *out = *in - out.PackageRef = in.PackageRef - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]SourceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RequestSource != nil { - in, out := &in.RequestSource, &out.RequestSource - *out = new(SourceSpec) - (*in).DeepCopyInto(*out) - } - if in.Sink != nil { - in, out := &in.Sink, &out.Sink - *out = new(SinkSpec) - (*in).DeepCopyInto(*out) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]v1.JSON, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. -func (in *FunctionSpec) DeepCopy() *FunctionSpec { - if in == nil { - return nil - } - out := new(FunctionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. -func (in *FunctionStatus) DeepCopy() *FunctionStatus { - if in == nil { - return nil - } - out := new(FunctionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FunctionType) DeepCopyInto(out *FunctionType) { - *out = *in - if in.Cloud != nil { - in, out := &in.Cloud, &out.Cloud - *out = new(CloudType) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionType. -func (in *FunctionType) DeepCopy() *FunctionType { - if in == nil { - return nil - } - out := new(FunctionType) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Module) DeepCopyInto(out *Module) { - *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]ConfigItem, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module. -func (in *Module) DeepCopy() *Module { - if in == nil { - return nil - } - out := new(Module) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Package) DeepCopyInto(out *Package) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Package. -func (in *Package) DeepCopy() *Package { - if in == nil { - return nil - } - out := new(Package) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Package) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageList) DeepCopyInto(out *PackageList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Package, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageList. -func (in *PackageList) DeepCopy() *PackageList { - if in == nil { - return nil - } - out := new(PackageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PackageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageRef) DeepCopyInto(out *PackageRef) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRef. -func (in *PackageRef) DeepCopy() *PackageRef { - if in == nil { - return nil - } - out := new(PackageRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageSpec) DeepCopyInto(out *PackageSpec) { - *out = *in - in.FunctionType.DeepCopyInto(&out.FunctionType) - if in.Modules != nil { - in, out := &in.Modules, &out.Modules - *out = make(map[string]Module, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageSpec. -func (in *PackageSpec) DeepCopy() *PackageSpec { - if in == nil { - return nil - } - out := new(PackageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PackageStatus) DeepCopyInto(out *PackageStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStatus. -func (in *PackageStatus) DeepCopy() *PackageStatus { - if in == nil { - return nil - } - out := new(PackageStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PulsarSinkSpec) DeepCopyInto(out *PulsarSinkSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PulsarSinkSpec. -func (in *PulsarSinkSpec) DeepCopy() *PulsarSinkSpec { - if in == nil { - return nil - } - out := new(PulsarSinkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PulsarSourceSpec) DeepCopyInto(out *PulsarSourceSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PulsarSourceSpec. -func (in *PulsarSourceSpec) DeepCopy() *PulsarSourceSpec { - if in == nil { - return nil - } - out := new(PulsarSourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SinkSpec) DeepCopyInto(out *SinkSpec) { - *out = *in - if in.Pulsar != nil { - in, out := &in.Pulsar, &out.Pulsar - *out = new(PulsarSinkSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkSpec. -func (in *SinkSpec) DeepCopy() *SinkSpec { - if in == nil { - return nil - } - out := new(SinkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { - *out = *in - if in.Pulsar != nil { - in, out := &in.Pulsar, &out.Pulsar - *out = new(PulsarSourceSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. -func (in *SourceSpec) DeepCopy() *SourceSpec { - if in == nil { - return nil - } - out := new(SourceSpec) - in.DeepCopyInto(out) - return out -} diff --git a/operator/cmd/main.go b/operator/cmd/main.go deleted file mode 100644 index 5a9f0644..00000000 --- a/operator/cmd/main.go +++ /dev/null @@ -1,280 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "crypto/tls" - "flag" - "os" - "path/filepath" - - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - _ "k8s.io/client-go/plugin/pkg/client/auth" - - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/certwatcher" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/metrics/filters" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - "github.com/FunctionStream/function-stream/operator/internal/controller" - webhookfsv1alpha1 "github.com/FunctionStream/function-stream/operator/internal/webhook/v1alpha1" - // +kubebuilder:scaffold:imports -) - -var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") -) - -func init() { - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - - utilruntime.Must(fsv1alpha1.AddToScheme(scheme)) - // +kubebuilder:scaffold:scheme -} - -// nolint:gocyclo -func main() { - var metricsAddr string - var metricsCertPath, metricsCertName, metricsCertKey string - var webhookCertPath, webhookCertName, webhookCertKey string - var enableLeaderElection bool - var probeAddr string - var secureMetrics bool - var enableHTTP2 bool - var pulsarServiceUrl string - var pulsarAuthPlugin string - var pulsarAuthParams string - var tlsOpts []func(*tls.Config) - flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ - "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - flag.BoolVar(&secureMetrics, "metrics-secure", true, - "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") - flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") - flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") - flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") - flag.StringVar(&metricsCertPath, "metrics-cert-path", "", - "The directory that contains the metrics server certificate.") - flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") - flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") - flag.BoolVar(&enableHTTP2, "enable-http2", false, - "If set, HTTP/2 will be enabled for the metrics and webhook servers") - // Pulsar CLI flags - flag.StringVar(&pulsarServiceUrl, "pulsar-service-url", os.Getenv("PULSAR_SERVICE_URL"), "Pulsar service URL") - flag.StringVar(&pulsarAuthPlugin, "pulsar-auth-plugin", os.Getenv("PULSAR_AUTH_PLUGIN"), "Pulsar auth plugin") - flag.StringVar(&pulsarAuthParams, "pulsar-auth-params", os.Getenv("PULSAR_AUTH_PARAMS"), "Pulsar auth params") - opts := zap.Options{ - Development: true, - } - opts.BindFlags(flag.CommandLine) - flag.Parse() - - config := controller.Config{ - PulsarServiceURL: pulsarServiceUrl, - PulsarAuthPlugin: pulsarAuthPlugin, - PulsarAuthParams: pulsarAuthParams, - } - - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) - - // if the enable-http2 flag is false (the default), http/2 should be disabled - // due to its vulnerabilities. More specifically, disabling http/2 will - // prevent from being vulnerable to the HTTP/2 Stream Cancellation and - // Rapid Reset CVEs. For more information see: - // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 - // - https://github.com/advisories/GHSA-4374-p667-p6c8 - disableHTTP2 := func(c *tls.Config) { - setupLog.Info("disabling http/2") - c.NextProtos = []string{"http/1.1"} - } - - if !enableHTTP2 { - tlsOpts = append(tlsOpts, disableHTTP2) - } - - // Create watchers for metrics and webhooks certificates - var metricsCertWatcher, webhookCertWatcher *certwatcher.CertWatcher - - // Initial webhook TLS options - webhookTLSOpts := tlsOpts - - if len(webhookCertPath) > 0 { - setupLog.Info("Initializing webhook certificate watcher using provided certificates", - "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) - - var err error - webhookCertWatcher, err = certwatcher.New( - filepath.Join(webhookCertPath, webhookCertName), - filepath.Join(webhookCertPath, webhookCertKey), - ) - if err != nil { - setupLog.Error(err, "Failed to initialize webhook certificate watcher") - os.Exit(1) - } - - webhookTLSOpts = append(webhookTLSOpts, func(config *tls.Config) { - config.GetCertificate = webhookCertWatcher.GetCertificate - }) - } - - webhookServer := webhook.NewServer(webhook.Options{ - TLSOpts: webhookTLSOpts, - }) - - // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. - // More info: - // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/server - // - https://book.kubebuilder.io/reference/metrics.html - metricsServerOptions := metricsserver.Options{ - BindAddress: metricsAddr, - SecureServing: secureMetrics, - TLSOpts: tlsOpts, - } - - if secureMetrics { - // FilterProvider is used to protect the metrics endpoint with authn/authz. - // These configurations ensure that only authorized users and service accounts - // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: - // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/filters#WithAuthenticationAndAuthorization - metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization - } - - // If the certificate is not specified, controller-runtime will automatically - // generate self-signed certificates for the metrics server. While convenient for development and testing, - // this setup is not recommended for production. - // - // TODO(user): If you enable certManager, uncomment the following lines: - // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates - // managed by cert-manager for the metrics server. - // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. - if len(metricsCertPath) > 0 { - setupLog.Info("Initializing metrics certificate watcher using provided certificates", - "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) - - var err error - metricsCertWatcher, err = certwatcher.New( - filepath.Join(metricsCertPath, metricsCertName), - filepath.Join(metricsCertPath, metricsCertKey), - ) - if err != nil { - setupLog.Error(err, "Failed to initialize metrics certificate watcher", "error", err) - os.Exit(1) - } - - metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { - config.GetCertificate = metricsCertWatcher.GetCertificate - }) - } - - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - Metrics: metricsServerOptions, - WebhookServer: webhookServer, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "2da2b91f.functionstream.github.io", - // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily - // when the Manager ends. This requires the binary to immediately end when the - // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly - // speeds up voluntary leader transitions as the new leader don't have to wait - // LeaseDuration time first. - // - // In the default scaffold provided, the program ends immediately after - // the manager stops, so would be fine to enable this option. However, - // if you are doing or is intended to do any operation such as perform cleanups - // after the manager stops then its usage might be unsafe. - // LeaderElectionReleaseOnCancel: true, - }) - if err != nil { - setupLog.Error(err, "unable to start manager") - os.Exit(1) - } - - if err = (&controller.PackagesReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Package") - os.Exit(1) - } - if err = (&controller.FunctionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Config: config, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Function") - os.Exit(1) - } - // nolint:goconst - if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = webhookfsv1alpha1.SetupFunctionWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "Function") - os.Exit(1) - } - } - // nolint:goconst - if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = webhookfsv1alpha1.SetupPackagesWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "Package") - os.Exit(1) - } - } - // +kubebuilder:scaffold:builder - - if metricsCertWatcher != nil { - setupLog.Info("Adding metrics certificate watcher to manager") - if err := mgr.Add(metricsCertWatcher); err != nil { - setupLog.Error(err, "unable to add metrics certificate watcher to manager") - os.Exit(1) - } - } - - if webhookCertWatcher != nil { - setupLog.Info("Adding webhook certificate watcher to manager") - if err := mgr.Add(webhookCertWatcher); err != nil { - setupLog.Error(err, "unable to add webhook certificate watcher to manager") - os.Exit(1) - } - } - - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up health check") - os.Exit(1) - } - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up ready check") - os.Exit(1) - } - - setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { - setupLog.Error(err, "problem running manager") - os.Exit(1) - } -} diff --git a/operator/config/certmanager/certificate-metrics.yaml b/operator/config/certmanager/certificate-metrics.yaml deleted file mode 100644 index 1125de29..00000000 --- a/operator/config/certmanager/certificate-metrics.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a metrics certificate CR. -# More document can be found at https://docs.cert-manager.io -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: metrics-certs # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - dnsNames: - # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize - # replacements in the config/default/kustomization.yaml file. - - SERVICE_NAME.SERVICE_NAMESPACE.svc - - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: metrics-server-cert diff --git a/operator/config/certmanager/certificate-webhook.yaml b/operator/config/certmanager/certificate-webhook.yaml deleted file mode 100644 index a839fa0e..00000000 --- a/operator/config/certmanager/certificate-webhook.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a certificate CR. -# More document can be found at https://docs.cert-manager.io -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize - # replacements in the config/default/kustomization.yaml file. - dnsNames: - - SERVICE_NAME.SERVICE_NAMESPACE.svc - - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: webhook-server-cert diff --git a/operator/config/certmanager/issuer.yaml b/operator/config/certmanager/issuer.yaml deleted file mode 100644 index 82ee162c..00000000 --- a/operator/config/certmanager/issuer.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# The following manifest contains a self-signed issuer CR. -# More information can be found at https://docs.cert-manager.io -# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: selfsigned-issuer - namespace: system -spec: - selfSigned: {} diff --git a/operator/config/certmanager/kustomization.yaml b/operator/config/certmanager/kustomization.yaml deleted file mode 100644 index fcb7498e..00000000 --- a/operator/config/certmanager/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -resources: -- issuer.yaml -- certificate-webhook.yaml -- certificate-metrics.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/operator/config/certmanager/kustomizeconfig.yaml b/operator/config/certmanager/kustomizeconfig.yaml deleted file mode 100644 index cf6f89e8..00000000 --- a/operator/config/certmanager/kustomizeconfig.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref substitution -nameReference: -- kind: Issuer - group: cert-manager.io - fieldSpecs: - - kind: Certificate - group: cert-manager.io - path: spec/issuerRef/name diff --git a/operator/config/crd/bases/fs.functionstream.github.io_functions.yaml b/operator/config/crd/bases/fs.functionstream.github.io_functions.yaml deleted file mode 100644 index 9214f7d7..00000000 --- a/operator/config/crd/bases/fs.functionstream.github.io_functions.yaml +++ /dev/null @@ -1,150 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.2 - name: functions.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Function - listKind: FunctionList - plural: functions - singular: function - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Function is the Schema for the functions API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: FunctionSpec defines the desired state of Function - properties: - config: - additionalProperties: - x-kubernetes-preserve-unknown-fields: true - description: Configurations as key-value pairs - type: object - description: - description: Description of the function - type: string - displayName: - description: Display name of the function - type: string - module: - description: Module name - type: string - packageRef: - description: Package reference - properties: - name: - description: Name of the Package resource - type: string - namespace: - description: Namespace of the Package resource - type: string - required: - - name - type: object - replicas: - default: 1 - description: Number of replicas for the function deployment - format: int32 - type: integer - requestSource: - description: Request source - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sink: - description: Sink specifies the sink configuration - properties: - pulsar: - description: Pulsar sink specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sources: - description: List of sources - items: - description: SourceSpec defines a source or sink specification - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - type: array - subscriptionName: - type: string - required: - - module - - packageRef - type: object - status: - description: FunctionStatus defines the observed state of Function - properties: - availableReplicas: - description: Number of available pods (ready for at least minReadySeconds) - format: int32 - type: integer - observedGeneration: - description: Most recent generation observed for this Function - format: int64 - type: integer - readyReplicas: - description: Total number of ready pods - format: int32 - type: integer - replicas: - description: Total number of non-terminated pods targeted by this - deployment - format: int32 - type: integer - updatedReplicas: - description: Total number of updated pods - format: int32 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/operator/config/crd/bases/fs.functionstream.github.io_packages.yaml b/operator/config/crd/bases/fs.functionstream.github.io_packages.yaml deleted file mode 100644 index 5e2238fb..00000000 --- a/operator/config/crd/bases/fs.functionstream.github.io_packages.yaml +++ /dev/null @@ -1,125 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.2 - name: packages.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Package - listKind: PackageList - plural: packages - shortNames: - - pkg - singular: package - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Package is the Schema for the packages API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: PackageSpec defines the desired state of Package - properties: - description: - description: Description provides additional information about the - package - type: string - displayName: - description: DisplayName is the human-readable name of the package - type: string - functionType: - description: FunctionType contains function type configuration - properties: - cloud: - description: Cloud contains cloud function package configuration - properties: - image: - description: Image specifies the container image for cloud - deployment - type: string - required: - - image - type: object - type: object - logo: - description: Logo is the URL or base64 encoded image for the package - logo - type: string - modules: - additionalProperties: - description: Module defines a module within a package - properties: - config: - additionalProperties: - description: ConfigItem defines a configuration item for a - module - properties: - description: - description: Description provides additional information - about the config item - type: string - displayName: - description: DisplayName is the human-readable name of - the config item - type: string - required: - description: Required indicates whether this config item - is mandatory - type: boolean - type: - description: Type specifies the data type of the config - item - type: string - type: object - description: Config is a list of configuration items for the - module - type: object - description: - description: Description provides additional information about - the module - type: string - displayName: - description: DisplayName is the human-readable name of the module - type: string - sinkSchema: - description: SinkSchema defines the output schema for the module - type: string - sourceSchema: - description: SourceSchema defines the input schema for the module - type: string - type: object - description: Modules is a map of module names to their configurations - type: object - required: - - functionType - - modules - type: object - status: - description: PackageStatus defines the observed state of Package. - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml deleted file mode 100644 index a141ac7c..00000000 --- a/operator/config/crd/kustomization.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# This kustomization.yaml is not intended to be run by itself, -# since it depends on service name and namespace that are out of this kustomize package. -# It should be run by config/default -resources: -- bases/fs.functionstream.github.io_functions.yaml -- bases/fs.functionstream.github.io_packages.yaml -# +kubebuilder:scaffold:crdkustomizeresource - -patches: -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. -# patches here are for enabling the conversion webhook for each CRD -# +kubebuilder:scaffold:crdkustomizewebhookpatch - -# [WEBHOOK] To enable webhook, uncomment the following section -# the following config is for teaching kustomize how to do kustomization for CRDs. -#configurations: -#- kustomizeconfig.yaml diff --git a/operator/config/crd/kustomizeconfig.yaml b/operator/config/crd/kustomizeconfig.yaml deleted file mode 100644 index ec5c150a..00000000 --- a/operator/config/crd/kustomizeconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# This file is for teaching kustomize how to substitute name and namespace reference in CRD -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: CustomResourceDefinition - version: v1 - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/name - -namespace: -- kind: CustomResourceDefinition - version: v1 - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/namespace - create: false - -varReference: -- path: metadata/annotations diff --git a/operator/config/default/cert_metrics_manager_patch.yaml b/operator/config/default/cert_metrics_manager_patch.yaml deleted file mode 100644 index d9750155..00000000 --- a/operator/config/default/cert_metrics_manager_patch.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. - -# Add the volumeMount for the metrics-server certs -- op: add - path: /spec/template/spec/containers/0/volumeMounts/- - value: - mountPath: /tmp/k8s-metrics-server/metrics-certs - name: metrics-certs - readOnly: true - -# Add the --metrics-cert-path argument for the metrics server -- op: add - path: /spec/template/spec/containers/0/args/- - value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs - -# Add the metrics-server certs volume configuration -- op: add - path: /spec/template/spec/volumes/- - value: - name: metrics-certs - secret: - secretName: metrics-server-cert - optional: false - items: - - key: ca.crt - path: ca.crt - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key diff --git a/operator/config/default/kustomization.yaml b/operator/config/default/kustomization.yaml deleted file mode 100644 index cf22f0b5..00000000 --- a/operator/config/default/kustomization.yaml +++ /dev/null @@ -1,234 +0,0 @@ -# Adds namespace to all resources. -namespace: operator-system - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. -namePrefix: operator- - -# Labels to add to all resources and selectors. -#labels: -#- includeSelectors: true -# pairs: -# someName: someValue - -resources: -- ../crd -- ../rbac -- ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -- ../webhook -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -#- ../prometheus -# [METRICS] Expose the controller manager metrics service. -- metrics_service.yaml -# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. -# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. -# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will -# be able to communicate with the Webhook Server. -#- ../network-policy - -# Uncomment the patches line if you enable Metrics -patches: -# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. -# More info: https://book.kubebuilder.io/reference/metrics -- path: manager_metrics_patch.yaml - target: - kind: Deployment - -# Uncomment the patches line if you enable Metrics and CertManager -# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. -# This patch will protect the metrics with certManager self-signed certs. -#- path: cert_metrics_manager_patch.yaml -# target: -# kind: Deployment - -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -- path: manager_webhook_patch.yaml - target: - kind: Deployment - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -# Uncomment the following replacements to add the cert-manager CA injection annotations -replacements: - - source: # Uncomment the following block to enable certificates for metrics - kind: Service - version: v1 - name: controller-manager-metrics-service - fieldPath: metadata.name - targets: - - select: - kind: Certificate - group: cert-manager.io - version: v1 - name: metrics-certs - fieldPaths: - - spec.dnsNames.0 - - spec.dnsNames.1 - options: - delimiter: '.' - index: 0 - create: true - - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor - kind: ServiceMonitor - group: monitoring.coreos.com - version: v1 - name: controller-manager-metrics-monitor - fieldPaths: - - spec.endpoints.0.tlsConfig.serverName - options: - delimiter: '.' - index: 0 - create: true - - - source: - kind: Service - version: v1 - name: controller-manager-metrics-service - fieldPath: metadata.namespace - targets: - - select: - kind: Certificate - group: cert-manager.io - version: v1 - name: metrics-certs - fieldPaths: - - spec.dnsNames.0 - - spec.dnsNames.1 - options: - delimiter: '.' - index: 1 - create: true - - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor - kind: ServiceMonitor - group: monitoring.coreos.com - version: v1 - name: controller-manager-metrics-monitor - fieldPaths: - - spec.endpoints.0.tlsConfig.serverName - options: - delimiter: '.' - index: 1 - create: true - - - source: # Uncomment the following block if you have any webhook - kind: Service - version: v1 - name: webhook-service - fieldPath: .metadata.name # Name of the service - targets: - - select: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert - fieldPaths: - - .spec.dnsNames.0 - - .spec.dnsNames.1 - options: - delimiter: '.' - index: 0 - create: true - - source: - kind: Service - version: v1 - name: webhook-service - fieldPath: .metadata.namespace # Namespace of the service - targets: - - select: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert - fieldPaths: - - .spec.dnsNames.0 - - .spec.dnsNames.1 - options: - delimiter: '.' - index: 1 - create: true - - - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # This name should match the one in certificate.yaml - fieldPath: .metadata.namespace # Namespace of the certificate CR - targets: - - select: - kind: ValidatingWebhookConfiguration - fieldPaths: - - .metadata.annotations.[cert-manager.io/inject-ca-from] - options: - delimiter: '/' - index: 0 - create: true - - source: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert - fieldPath: .metadata.name - targets: - - select: - kind: ValidatingWebhookConfiguration - fieldPaths: - - .metadata.annotations.[cert-manager.io/inject-ca-from] - options: - delimiter: '/' - index: 1 - create: true - - - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert - fieldPath: .metadata.namespace # Namespace of the certificate CR - targets: - - select: - kind: MutatingWebhookConfiguration - fieldPaths: - - .metadata.annotations.[cert-manager.io/inject-ca-from] - options: - delimiter: '/' - index: 0 - create: true - - source: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert - fieldPath: .metadata.name - targets: - - select: - kind: MutatingWebhookConfiguration - fieldPaths: - - .metadata.annotations.[cert-manager.io/inject-ca-from] - options: - delimiter: '/' - index: 1 - create: true - -# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert -# fieldPath: .metadata.namespace # Namespace of the certificate CR -# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. -# +kubebuilder:scaffold:crdkustomizecainjectionns -# - source: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert -# fieldPath: .metadata.name -# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. -# +kubebuilder:scaffold:crdkustomizecainjectionname diff --git a/operator/config/default/manager_metrics_patch.yaml b/operator/config/default/manager_metrics_patch.yaml deleted file mode 100644 index 2aaef653..00000000 --- a/operator/config/default/manager_metrics_patch.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This patch adds the args to allow exposing the metrics endpoint using HTTPS -- op: add - path: /spec/template/spec/containers/0/args/0 - value: --metrics-bind-address=:8443 diff --git a/operator/config/default/manager_webhook_patch.yaml b/operator/config/default/manager_webhook_patch.yaml deleted file mode 100644 index 963c8a4c..00000000 --- a/operator/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# This patch ensures the webhook certificates are properly mounted in the manager container. -# It configures the necessary arguments, volumes, volume mounts, and container ports. - -# Add the --webhook-cert-path argument for configuring the webhook certificate path -- op: add - path: /spec/template/spec/containers/0/args/- - value: --webhook-cert-path=/tmp/k8s-webhook-server/serving-certs - -# Add the volumeMount for the webhook certificates -- op: add - path: /spec/template/spec/containers/0/volumeMounts/- - value: - mountPath: /tmp/k8s-webhook-server/serving-certs - name: webhook-certs - readOnly: true - -# Add the port configuration for the webhook server -- op: add - path: /spec/template/spec/containers/0/ports/- - value: - containerPort: 9443 - name: webhook-server - protocol: TCP - -# Add the volume configuration for the webhook certificates -- op: add - path: /spec/template/spec/volumes/- - value: - name: webhook-certs - secret: - secretName: webhook-server-cert diff --git a/operator/config/default/metrics_service.yaml b/operator/config/default/metrics_service.yaml deleted file mode 100644 index 1f4155a7..00000000 --- a/operator/config/default/metrics_service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - control-plane: controller-manager - app.kubernetes.io/name: operator diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml deleted file mode 100644 index f107c4d3..00000000 --- a/operator/config/manager/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -resources: -- manager.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -images: -- name: controller - newName: functionstream/operator - newTag: latest diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml deleted file mode 100644 index 5c9e4c42..00000000 --- a/operator/config/manager/manager.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize -spec: - selector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator - replicas: 1 - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - control-plane: controller-manager - app.kubernetes.io/name: operator - spec: - # TODO(user): Uncomment the following code to configure the nodeAffinity expression - # according to the platforms which are supported by your solution. - # It is considered best practice to support multiple architectures. You can - # build your manager image using the makefile target docker-buildx. - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/arch - # operator: In - # values: - # - amd64 - # - arm64 - # - ppc64le - # - s390x - # - key: kubernetes.io/os - # operator: In - # values: - # - linux - securityContext: - # Projects are configured by default to adhere to the "restricted" Pod Security Standards. - # This ensures that deployments meet the highest security requirements for Kubernetes. - # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - containers: - - command: - - /manager - args: - - --leader-elect - - --health-probe-bind-address=:8081 - image: functionstream/operator:latest - name: manager - ports: [] - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - volumeMounts: [] - volumes: [] - serviceAccountName: controller-manager - terminationGracePeriodSeconds: 10 diff --git a/operator/config/network-policy/allow-metrics-traffic.yaml b/operator/config/network-policy/allow-metrics-traffic.yaml deleted file mode 100644 index d3ac9836..00000000 --- a/operator/config/network-policy/allow-metrics-traffic.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# This NetworkPolicy allows ingress traffic -# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those -# namespaces are able to gather data from the metrics endpoint. -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: allow-metrics-traffic - namespace: system -spec: - podSelector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator - policyTypes: - - Ingress - ingress: - # This allows ingress traffic from any namespace with the label metrics: enabled - - from: - - namespaceSelector: - matchLabels: - metrics: enabled # Only from namespaces with this label - ports: - - port: 8443 - protocol: TCP diff --git a/operator/config/network-policy/allow-webhook-traffic.yaml b/operator/config/network-policy/allow-webhook-traffic.yaml deleted file mode 100644 index 08fbd901..00000000 --- a/operator/config/network-policy/allow-webhook-traffic.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# This NetworkPolicy allows ingress traffic to your webhook server running -# as part of the controller-manager from specific namespaces and pods. CR(s) which uses webhooks -# will only work when applied in namespaces labeled with 'webhook: enabled' -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: allow-webhook-traffic - namespace: system -spec: - podSelector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator - policyTypes: - - Ingress - ingress: - # This allows ingress traffic from any namespace with the label webhook: enabled - - from: - - namespaceSelector: - matchLabels: - webhook: enabled # Only from namespaces with this label - ports: - - port: 443 - protocol: TCP diff --git a/operator/config/network-policy/kustomization.yaml b/operator/config/network-policy/kustomization.yaml deleted file mode 100644 index 0872bee1..00000000 --- a/operator/config/network-policy/kustomization.yaml +++ /dev/null @@ -1,3 +0,0 @@ -resources: -- allow-webhook-traffic.yaml -- allow-metrics-traffic.yaml diff --git a/operator/config/prometheus/kustomization.yaml b/operator/config/prometheus/kustomization.yaml deleted file mode 100644 index fdc5481b..00000000 --- a/operator/config/prometheus/kustomization.yaml +++ /dev/null @@ -1,11 +0,0 @@ -resources: -- monitor.yaml - -# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus -# to securely reference certificates created and managed by cert-manager. -# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml -# to mount the "metrics-server-cert" secret in the Manager Deployment. -#patches: -# - path: monitor_tls_patch.yaml -# target: -# kind: ServiceMonitor diff --git a/operator/config/prometheus/monitor.yaml b/operator/config/prometheus/monitor.yaml deleted file mode 100644 index b73583e3..00000000 --- a/operator/config/prometheus/monitor.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-monitor - namespace: system -spec: - endpoints: - - path: /metrics - port: https # Ensure this is the name of the port that exposes HTTPS metrics - scheme: https - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - tlsConfig: - # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables - # certificate verification, exposing the system to potential man-in-the-middle attacks. - # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. - # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, - # which securely references the certificate from the 'metrics-server-cert' secret. - insecureSkipVerify: true - selector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator diff --git a/operator/config/prometheus/monitor_tls_patch.yaml b/operator/config/prometheus/monitor_tls_patch.yaml deleted file mode 100644 index 5bf84ce0..00000000 --- a/operator/config/prometheus/monitor_tls_patch.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Patch for Prometheus ServiceMonitor to enable secure TLS configuration -# using certificates managed by cert-manager -- op: replace - path: /spec/endpoints/0/tlsConfig - value: - # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize - serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc - insecureSkipVerify: false - ca: - secret: - name: metrics-server-cert - key: ca.crt - cert: - secret: - name: metrics-server-cert - key: tls.crt - keySecret: - name: metrics-server-cert - key: tls.key diff --git a/operator/config/rbac/function_admin_role.yaml b/operator/config/rbac/function_admin_role.yaml deleted file mode 100644 index 6dd5cc7d..00000000 --- a/operator/config/rbac/function_admin_role.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: function-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get diff --git a/operator/config/rbac/function_editor_role.yaml b/operator/config/rbac/function_editor_role.yaml deleted file mode 100644 index 20cf07a7..00000000 --- a/operator/config/rbac/function_editor_role.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: function-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get diff --git a/operator/config/rbac/function_viewer_role.yaml b/operator/config/rbac/function_viewer_role.yaml deleted file mode 100644 index 877da612..00000000 --- a/operator/config/rbac/function_viewer_role.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: function-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get diff --git a/operator/config/rbac/kustomization.yaml b/operator/config/rbac/kustomization.yaml deleted file mode 100644 index 4d84bad8..00000000 --- a/operator/config/rbac/kustomization.yaml +++ /dev/null @@ -1,31 +0,0 @@ -resources: -# All RBAC will be applied under this service account in -# the deployment namespace. You may comment out this resource -# if your manager will use a service account that exists at -# runtime. Be sure to update RoleBinding and ClusterRoleBinding -# subjects if changing service account names. -- service_account.yaml -- role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# The following RBAC configurations are used to protect -# the metrics endpoint with authn/authz. These configurations -# ensure that only authorized users and service accounts -# can access the metrics endpoint. Comment the following -# permissions if you want to disable this protection. -# More info: https://book.kubebuilder.io/reference/metrics.html -- metrics_auth_role.yaml -- metrics_auth_role_binding.yaml -- metrics_reader_role.yaml -# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by -# default, aiding admins in cluster management. Those roles are -# not used by the {{ .ProjectName }} itself. You can comment the following lines -# if you do not want those helpers be installed with your Project. -- function_admin_role.yaml -- function_editor_role.yaml -- function_viewer_role.yaml -- packages_admin_role.yaml -- packages_editor_role.yaml -- packages_viewer_role.yaml - diff --git a/operator/config/rbac/leader_election_role.yaml b/operator/config/rbac/leader_election_role.yaml deleted file mode 100644 index 507e52b1..00000000 --- a/operator/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch diff --git a/operator/config/rbac/leader_election_role_binding.yaml b/operator/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index c60ecc72..00000000 --- a/operator/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/operator/config/rbac/metrics_auth_role.yaml b/operator/config/rbac/metrics_auth_role.yaml deleted file mode 100644 index 32d2e4ec..00000000 --- a/operator/config/rbac/metrics_auth_role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-auth-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create diff --git a/operator/config/rbac/metrics_auth_role_binding.yaml b/operator/config/rbac/metrics_auth_role_binding.yaml deleted file mode 100644 index e775d67f..00000000 --- a/operator/config/rbac/metrics_auth_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-auth-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: metrics-auth-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/operator/config/rbac/metrics_reader_role.yaml b/operator/config/rbac/metrics_reader_role.yaml deleted file mode 100644 index 51a75db4..00000000 --- a/operator/config/rbac/metrics_reader_role.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get diff --git a/operator/config/rbac/packages_admin_role.yaml b/operator/config/rbac/packages_admin_role.yaml deleted file mode 100644 index 55d2e6d8..00000000 --- a/operator/config/rbac/packages_admin_role.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: packages-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get diff --git a/operator/config/rbac/packages_editor_role.yaml b/operator/config/rbac/packages_editor_role.yaml deleted file mode 100644 index af55448f..00000000 --- a/operator/config/rbac/packages_editor_role.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: packages-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get diff --git a/operator/config/rbac/packages_viewer_role.yaml b/operator/config/rbac/packages_viewer_role.yaml deleted file mode 100644 index dae9caa4..00000000 --- a/operator/config/rbac/packages_viewer_role.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: packages-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml deleted file mode 100644 index 79cfd5c4..00000000 --- a/operator/config/rbac/role.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: manager-role -rules: -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - - package - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/finalizers - - package/finalizers - verbs: - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - - package/status - verbs: - - get - - patch - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch diff --git a/operator/config/rbac/role_binding.yaml b/operator/config/rbac/role_binding.yaml deleted file mode 100644 index 5d279606..00000000 --- a/operator/config/rbac/role_binding.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/operator/config/rbac/service_account.yaml b/operator/config/rbac/service_account.yaml deleted file mode 100644 index 3567d2fe..00000000 --- a/operator/config/rbac/service_account.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager - namespace: system diff --git a/operator/config/samples/fs_v1alpha1_function.yaml b/operator/config/samples/fs_v1alpha1_function.yaml deleted file mode 100644 index 62ca5471..00000000 --- a/operator/config/samples/fs_v1alpha1_function.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: fs.functionstream.github.io/v1alpha1 -kind: Function -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: function-sample -spec: - displayName: "Sample Function" - description: "A sample function for demonstration purposes." - packageRef: - name: "sample-package" - # namespace: "default" # Optional: defaults to the same namespace as the Function - module: "sample-module" - # TODO(user): Add fields here diff --git a/operator/config/samples/fs_v1alpha1_packages.yaml b/operator/config/samples/fs_v1alpha1_packages.yaml deleted file mode 100644 index b0b3dffa..00000000 --- a/operator/config/samples/fs_v1alpha1_packages.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: fs.functionstream.github.io/v1alpha1 -kind: Packages -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: packages-sample -spec: - # TODO(user): Add fields here diff --git a/operator/config/samples/kustomization.yaml b/operator/config/samples/kustomization.yaml deleted file mode 100644 index 0dc1f452..00000000 --- a/operator/config/samples/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -## Append samples of your project ## -resources: -- fs_v1alpha1_packages.yaml -- fs_v1alpha1_function.yaml -# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/operator/config/webhook/kustomization.yaml b/operator/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf26134..00000000 --- a/operator/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/operator/config/webhook/kustomizeconfig.yaml b/operator/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 206316e5..00000000 --- a/operator/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting nameReference. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true diff --git a/operator/config/webhook/manifests.yaml b/operator/config/webhook/manifests.yaml deleted file mode 100644 index 12c9404a..00000000 --- a/operator/config/webhook/manifests.yaml +++ /dev/null @@ -1,94 +0,0 @@ ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - name: mfunction-v1alpha1.kb.io - rules: - - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - functions - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - name: mpackage-v1alpha1.kb.io - rules: - - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - packages - sideEffects: None ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - name: vfunction-v1alpha1.kb.io - rules: - - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - - DELETE - resources: - - functions - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - name: vpackage-v1alpha1.kb.io - rules: - - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - - DELETE - resources: - - packages - sideEffects: None diff --git a/operator/config/webhook/service.yaml b/operator/config/webhook/service.yaml deleted file mode 100644 index a10cc235..00000000 --- a/operator/config/webhook/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/name: operator - app.kubernetes.io/managed-by: kustomize - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - protocol: TCP - targetPort: 9443 - selector: - control-plane: controller-manager - app.kubernetes.io/name: operator diff --git a/operator/deploy/chart/.helmignore b/operator/deploy/chart/.helmignore deleted file mode 100644 index 7d92f7fb..00000000 --- a/operator/deploy/chart/.helmignore +++ /dev/null @@ -1,25 +0,0 @@ -# Patterns to ignore when building Helm packages. -# Operating system files -.DS_Store - -# Version control directories -.git/ -.gitignore -.bzr/ -.hg/ -.hgignore -.svn/ - -# Backup and temporary files -*.swp -*.tmp -*.bak -*.orig -*~ - -# IDE and editor-related files -.idea/ -.vscode/ - -# Helm chart artifacts -dist/chart/*.tgz diff --git a/operator/deploy/chart/Chart.yaml b/operator/deploy/chart/Chart.yaml deleted file mode 100644 index 2eac6b8b..00000000 --- a/operator/deploy/chart/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v2 -name: operator -description: A Helm chart to deploy the FunctionStream operator on Kubernetes. -type: application -version: 0.1.0 -appVersion: "0.1.0" -home: "https://github.com/FunctionStream/function-stream" -sources: - - "https://github.com/FunctionStream/function-stream/operator" -maintainers: - - name: Zike Yang - email: zike@apache.org -keywords: - - serverless - - streaming - - functionstream - - operators -annotations: - category: "Operators" diff --git a/operator/deploy/chart/templates/_helpers.tpl b/operator/deploy/chart/templates/_helpers.tpl deleted file mode 100644 index 668abe33..00000000 --- a/operator/deploy/chart/templates/_helpers.tpl +++ /dev/null @@ -1,50 +0,0 @@ -{{- define "chart.name" -}} -{{- if .Chart }} - {{- if .Chart.Name }} - {{- .Chart.Name | trunc 63 | trimSuffix "-" }} - {{- else if .Values.nameOverride }} - {{ .Values.nameOverride | trunc 63 | trimSuffix "-" }} - {{- else }} - operator - {{- end }} -{{- else }} - operator -{{- end }} -{{- end }} - - -{{- define "chart.labels" -}} -{{- if .Chart.AppVersion -}} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -{{- if .Chart.Version }} -helm.sh/chart: {{ .Chart.Version | quote }} -{{- end }} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - - -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - - -{{- define "chart.hasMutatingWebhooks" -}} -{{- $hasMutating := false }} -{{- range . }} - {{- if eq .type "mutating" }} - $hasMutating = true }}{{- end }} -{{- end }} -{{ $hasMutating }}}}{{- end }} - - -{{- define "chart.hasValidatingWebhooks" -}} -{{- $hasValidating := false }} -{{- range . }} - {{- if eq .type "validating" }} - $hasValidating = true }}{{- end }} -{{- end }} -{{ $hasValidating }}}}{{- end }} diff --git a/operator/deploy/chart/templates/certmanager/certificate.yaml b/operator/deploy/chart/templates/certmanager/certificate.yaml deleted file mode 100644 index 2dfb9e9b..00000000 --- a/operator/deploy/chart/templates/certmanager/certificate.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{{- if .Values.certmanager.enable }} -# Self-signed Issuer -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: selfsigned-issuer - namespace: {{ .Release.Namespace }} -spec: - selfSigned: {} -{{- if .Values.webhook.enable }} ---- -# Certificate for the webhook -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - annotations: - {{- if .Values.crd.keep }} - "helm.sh/resource-policy": keep - {{- end }} - name: serving-cert - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - dnsNames: - - operator.{{ .Release.Namespace }}.svc - - operator.{{ .Release.Namespace }}.svc.cluster.local - - operator-webhook-service.{{ .Release.Namespace }}.svc - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: webhook-server-cert -{{- end }} -{{- if .Values.metrics.enable }} ---- -# Certificate for the metrics -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - annotations: - {{- if .Values.crd.keep }} - "helm.sh/resource-policy": keep - {{- end }} - labels: - {{- include "chart.labels" . | nindent 4 }} - name: metrics-certs - namespace: {{ .Release.Namespace }} -spec: - dnsNames: - - operator.{{ .Release.Namespace }}.svc - - operator.{{ .Release.Namespace }}.svc.cluster.local - - operator-metrics-service.{{ .Release.Namespace }}.svc - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: metrics-server-cert -{{- end }} -{{- end }} diff --git a/operator/deploy/chart/templates/crd/fs.functionstream.github.io_functions.yaml b/operator/deploy/chart/templates/crd/fs.functionstream.github.io_functions.yaml deleted file mode 100755 index d5795e77..00000000 --- a/operator/deploy/chart/templates/crd/fs.functionstream.github.io_functions.yaml +++ /dev/null @@ -1,157 +0,0 @@ -{{- if .Values.crd.enable }} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - annotations: - {{- if .Values.crd.keep }} - "helm.sh/resource-policy": keep - {{- end }} - controller-gen.kubebuilder.io/version: v0.17.2 - name: functions.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Function - listKind: FunctionList - plural: functions - singular: function - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Function is the Schema for the functions API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: FunctionSpec defines the desired state of Function - properties: - config: - additionalProperties: - x-kubernetes-preserve-unknown-fields: true - description: Configurations as key-value pairs - type: object - description: - description: Description of the function - type: string - displayName: - description: Display name of the function - type: string - module: - description: Module name - type: string - packageRef: - description: Package reference - properties: - name: - description: Name of the Package resource - type: string - namespace: - description: Namespace of the Package resource - type: string - required: - - name - type: object - replicas: - default: 1 - description: Number of replicas for the function deployment - format: int32 - type: integer - requestSource: - description: Request source - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sink: - description: Sink specifies the sink configuration - properties: - pulsar: - description: Pulsar sink specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sources: - description: List of sources - items: - description: SourceSpec defines a source or sink specification - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - type: array - subscriptionName: - type: string - required: - - module - - packageRef - type: object - status: - description: FunctionStatus defines the observed state of Function - properties: - availableReplicas: - description: Number of available pods (ready for at least minReadySeconds) - format: int32 - type: integer - observedGeneration: - description: Most recent generation observed for this Function - format: int64 - type: integer - readyReplicas: - description: Total number of ready pods - format: int32 - type: integer - replicas: - description: Total number of non-terminated pods targeted by this - deployment - format: int32 - type: integer - updatedReplicas: - description: Total number of updated pods - format: int32 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -{{- end -}} diff --git a/operator/deploy/chart/templates/crd/fs.functionstream.github.io_packages.yaml b/operator/deploy/chart/templates/crd/fs.functionstream.github.io_packages.yaml deleted file mode 100755 index 00f60dae..00000000 --- a/operator/deploy/chart/templates/crd/fs.functionstream.github.io_packages.yaml +++ /dev/null @@ -1,132 +0,0 @@ -{{- if .Values.crd.enable }} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - annotations: - {{- if .Values.crd.keep }} - "helm.sh/resource-policy": keep - {{- end }} - controller-gen.kubebuilder.io/version: v0.17.2 - name: packages.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Package - listKind: PackageList - plural: packages - shortNames: - - pkg - singular: package - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Package is the Schema for the packages API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: PackageSpec defines the desired state of Package - properties: - description: - description: Description provides additional information about the - package - type: string - displayName: - description: DisplayName is the human-readable name of the package - type: string - functionType: - description: FunctionType contains function type configuration - properties: - cloud: - description: Cloud contains cloud function package configuration - properties: - image: - description: Image specifies the container image for cloud - deployment - type: string - required: - - image - type: object - type: object - logo: - description: Logo is the URL or base64 encoded image for the package - logo - type: string - modules: - additionalProperties: - description: Module defines a module within a package - properties: - config: - additionalProperties: - description: ConfigItem defines a configuration item for a - module - properties: - description: - description: Description provides additional information - about the config item - type: string - displayName: - description: DisplayName is the human-readable name of - the config item - type: string - required: - description: Required indicates whether this config item - is mandatory - type: boolean - type: - description: Type specifies the data type of the config - item - type: string - type: object - description: Config is a list of configuration items for the - module - type: object - description: - description: Description provides additional information about - the module - type: string - displayName: - description: DisplayName is the human-readable name of the module - type: string - sinkSchema: - description: SinkSchema defines the output schema for the module - type: string - sourceSchema: - description: SourceSchema defines the input schema for the module - type: string - type: object - description: Modules is a map of module names to their configurations - type: object - required: - - functionType - - modules - type: object - status: - description: PackageStatus defines the observed state of Package. - type: object - type: object - served: true - storage: true - subresources: - status: {} -{{- end -}} diff --git a/operator/deploy/chart/templates/manager/manager.yaml b/operator/deploy/chart/templates/manager/manager.yaml deleted file mode 100644 index 8cdb2475..00000000 --- a/operator/deploy/chart/templates/manager/manager.yaml +++ /dev/null @@ -1,103 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: function-stream - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} - control-plane: controller-manager -spec: - replicas: {{ .Values.controllerManager.replicas }} - selector: - matchLabels: - {{- include "chart.selectorLabels" . | nindent 6 }} - control-plane: controller-manager - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - {{- include "chart.labels" . | nindent 8 }} - control-plane: controller-manager - {{- if and .Values.controllerManager.pod .Values.controllerManager.pod.labels }} - {{- range $key, $value := .Values.controllerManager.pod.labels }} - {{ $key }}: {{ $value }} - {{- end }} - {{- end }} - spec: - containers: - - name: manager - args: - {{- range .Values.controllerManager.container.args }} - - {{ . }} - {{- end }} - command: - - /manager - image: {{ .Values.controllerManager.container.image.repository }}:{{ .Values.controllerManager.container.image.tag }} - imagePullPolicy: {{ .Values.controllerManager.container.imagePullPolicy }} - env: - {{- if .Values.pulsar.standalone.enable }} - - name: PULSAR_SERVICE_URL - value: pulsar://{{ .Release.Name }}-pulsar-standalone.{{ .Release.Namespace }}.svc.cluster.local:6650 - {{- else if .Values.pulsar.serviceUrl }} - - name: PULSAR_SERVICE_URL - value: {{ .Values.pulsar.serviceUrl }} - {{- end }} - {{- if .Values.pulsar.authPlugin }} - - name: PULSAR_AUTH_PLUGIN - value: {{ .Values.pulsar.authPlugin }} - {{- end }} - {{- if .Values.pulsar.authParams }} - - name: PULSAR_AUTH_PARAMS - value: {{ .Values.pulsar.authParams }} - {{- end }} - {{- if .Values.controllerManager.container.env }} - {{- range $key, $value := .Values.controllerManager.container.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - {{- end }} - livenessProbe: - {{- toYaml .Values.controllerManager.container.livenessProbe | nindent 12 }} - readinessProbe: - {{- toYaml .Values.controllerManager.container.readinessProbe | nindent 12 }} - {{- if .Values.webhook.enable }} - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - {{- end }} - resources: - {{- toYaml .Values.controllerManager.container.resources | nindent 12 }} - securityContext: - {{- toYaml .Values.controllerManager.container.securityContext | nindent 12 }} - {{- if and .Values.certmanager.enable (or .Values.webhook.enable .Values.metrics.enable) }} - volumeMounts: - {{- if and .Values.webhook.enable .Values.certmanager.enable }} - - name: webhook-cert - mountPath: /tmp/k8s-webhook-server/serving-certs - readOnly: true - {{- end }} - {{- if and .Values.metrics.enable .Values.certmanager.enable }} - - name: metrics-certs - mountPath: /tmp/k8s-metrics-server/metrics-certs - readOnly: true - {{- end }} - {{- end }} - securityContext: - {{- toYaml .Values.controllerManager.securityContext | nindent 8 }} - serviceAccountName: {{ .Values.controllerManager.serviceAccountName }} - terminationGracePeriodSeconds: {{ .Values.controllerManager.terminationGracePeriodSeconds }} - {{- if and .Values.certmanager.enable (or .Values.webhook.enable .Values.metrics.enable) }} - volumes: - {{- if and .Values.webhook.enable .Values.certmanager.enable }} - - name: webhook-cert - secret: - secretName: webhook-server-cert - {{- end }} - {{- if and .Values.metrics.enable .Values.certmanager.enable }} - - name: metrics-certs - secret: - secretName: metrics-server-cert - {{- end }} - {{- end }} diff --git a/operator/deploy/chart/templates/metrics/metrics-service.yaml b/operator/deploy/chart/templates/metrics/metrics-service.yaml deleted file mode 100644 index a91cc04a..00000000 --- a/operator/deploy/chart/templates/metrics/metrics-service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.metrics.enable }} -apiVersion: v1 -kind: Service -metadata: - name: operator-controller-manager-metrics-service - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - ports: - - port: 8443 - targetPort: 8443 - protocol: TCP - name: https - selector: - control-plane: controller-manager -{{- end }} diff --git a/operator/deploy/chart/templates/namespace/namespace.yaml b/operator/deploy/chart/templates/namespace/namespace.yaml deleted file mode 100644 index 3b40a0cb..00000000 --- a/operator/deploy/chart/templates/namespace/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -{{- if .Values.createNamespace }} -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .Release.Namespace }} -{{- end -}} \ No newline at end of file diff --git a/operator/deploy/chart/templates/network-policy/allow-metrics-traffic.yaml b/operator/deploy/chart/templates/network-policy/allow-metrics-traffic.yaml deleted file mode 100755 index 9f392cf9..00000000 --- a/operator/deploy/chart/templates/network-policy/allow-metrics-traffic.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.networkPolicy.enable }} -# This NetworkPolicy allows ingress traffic -# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those -# namespaces are able to gather data from the metrics endpoint. -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: allow-metrics-traffic - namespace: {{ .Release.Namespace }} -spec: - podSelector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator - policyTypes: - - Ingress - ingress: - # This allows ingress traffic from any namespace with the label metrics: enabled - - from: - - namespaceSelector: - matchLabels: - metrics: enabled # Only from namespaces with this label - ports: - - port: 8443 - protocol: TCP -{{- end -}} diff --git a/operator/deploy/chart/templates/network-policy/allow-webhook-traffic.yaml b/operator/deploy/chart/templates/network-policy/allow-webhook-traffic.yaml deleted file mode 100755 index b42e482f..00000000 --- a/operator/deploy/chart/templates/network-policy/allow-webhook-traffic.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.networkPolicy.enable }} -# This NetworkPolicy allows ingress traffic to your webhook server running -# as part of the controller-manager from specific namespaces and pods. CR(s) which uses webhooks -# will only work when applied in namespaces labeled with 'webhook: enabled' -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: allow-webhook-traffic - namespace: {{ .Release.Namespace }} -spec: - podSelector: - matchLabels: - control-plane: controller-manager - app.kubernetes.io/name: operator - policyTypes: - - Ingress - ingress: - # This allows ingress traffic from any namespace with the label webhook: enabled - - from: - - namespaceSelector: - matchLabels: - webhook: enabled # Only from namespaces with this label - ports: - - port: 443 - protocol: TCP -{{- end -}} diff --git a/operator/deploy/chart/templates/prometheus/monitor.yaml b/operator/deploy/chart/templates/prometheus/monitor.yaml deleted file mode 100644 index c0bbc922..00000000 --- a/operator/deploy/chart/templates/prometheus/monitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# To integrate with Prometheus. -{{- if .Values.prometheus.enable }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: fs-operator-controller-manager-metrics-monitor - namespace: {{ .Release.Namespace }} -spec: - endpoints: - - path: /metrics - port: https - scheme: https - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - tlsConfig: - {{- if .Values.certmanager.enable }} - serverName: operator-controller-manager-metrics-service.{{ .Release.Namespace }}.svc - # Apply secure TLS configuration with cert-manager - insecureSkipVerify: false - ca: - secret: - name: metrics-server-cert - key: ca.crt - cert: - secret: - name: metrics-server-cert - key: tls.crt - keySecret: - name: metrics-server-cert - key: tls.key - {{- else }} - # Development/Test mode (insecure configuration) - insecureSkipVerify: true - {{- end }} - selector: - matchLabels: - control-plane: controller-manager -{{- end }} diff --git a/operator/deploy/chart/templates/pulsar/service.yaml b/operator/deploy/chart/templates/pulsar/service.yaml deleted file mode 100644 index a460cbc2..00000000 --- a/operator/deploy/chart/templates/pulsar/service.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.pulsar.standalone.enable }} -apiVersion: v1 -kind: Service -metadata: - name: {{ .Release.Name }}-pulsar-standalone - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} - app: pulsar-standalone -spec: - type: {{ .Values.pulsar.standalone.service.type }} - ports: - - name: pulsar - port: {{ .Values.pulsar.standalone.service.ports.pulsar }} - targetPort: 6650 - protocol: TCP - - name: admin - port: {{ .Values.pulsar.standalone.service.ports.admin }} - targetPort: 8080 - protocol: TCP - selector: - {{- include "chart.selectorLabels" . | nindent 4 }} - app: pulsar-standalone -{{- end }} \ No newline at end of file diff --git a/operator/deploy/chart/templates/pulsar/statefulset.yaml b/operator/deploy/chart/templates/pulsar/statefulset.yaml deleted file mode 100644 index a7f7ef85..00000000 --- a/operator/deploy/chart/templates/pulsar/statefulset.yaml +++ /dev/null @@ -1,77 +0,0 @@ -{{- if .Values.pulsar.standalone.enable }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Release.Name }}-pulsar-standalone - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} - app: pulsar-standalone - app.kubernetes.io/component: messaging -spec: - serviceName: {{ .Release.Name }}-pulsar-standalone - replicas: 1 - selector: - matchLabels: - {{- include "chart.selectorLabels" . | nindent 6 }} - app: pulsar-standalone - template: - metadata: - labels: - {{- include "chart.labels" . | nindent 8 }} - app: pulsar-standalone - app.kubernetes.io/component: messaging - spec: - containers: - - name: pulsar - image: {{ .Values.pulsar.standalone.image.repository }}:{{ .Values.pulsar.standalone.image.tag }} - command: - - sh - - -c - - | - # Initialize Pulsar standalone - bin/pulsar standalone -nfw -nss - ports: - - name: pulsar - containerPort: 6650 - protocol: TCP - - name: admin - containerPort: 8080 - protocol: TCP - resources: - {{- toYaml .Values.pulsar.standalone.resources | nindent 12 }} - {{- if .Values.pulsar.standalone.storage.persistence.enabled }} - volumeMounts: - - name: pulsar-data - mountPath: /pulsar/data - {{- end }} - livenessProbe: - httpGet: - path: /admin/v2/brokers/health - port: 8080 - initialDelaySeconds: 60 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /admin/v2/brokers/health - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 3 - {{- if .Values.pulsar.standalone.storage.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: pulsar-data - spec: - accessModes: ["ReadWriteOnce"] - {{- if .Values.pulsar.standalone.storage.storageClass }} - storageClassName: {{ .Values.pulsar.standalone.storage.storageClass }} - {{- end }} - resources: - requests: - storage: {{ .Values.pulsar.standalone.storage.size }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/operator/deploy/chart/templates/rbac/function_admin_role.yaml b/operator/deploy/chart/templates/rbac/function_admin_role.yaml deleted file mode 100755 index a8075cfd..00000000 --- a/operator/deploy/chart/templates/rbac/function_admin_role.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: function-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/function_editor_role.yaml b/operator/deploy/chart/templates/rbac/function_editor_role.yaml deleted file mode 100755 index c0d80285..00000000 --- a/operator/deploy/chart/templates/rbac/function_editor_role.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: function-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/function_viewer_role.yaml b/operator/deploy/chart/templates/rbac/function_viewer_role.yaml deleted file mode 100755 index e488bf97..00000000 --- a/operator/deploy/chart/templates/rbac/function_viewer_role.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: function-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/leader_election_role.yaml b/operator/deploy/chart/templates/rbac/leader_election_role.yaml deleted file mode 100755 index e28d0924..00000000 --- a/operator/deploy/chart/templates/rbac/leader_election_role.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if .Values.rbac.enable }} -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} - name: fs-operator-leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/leader_election_role_binding.yaml b/operator/deploy/chart/templates/rbac/leader_election_role_binding.yaml deleted file mode 100755 index 87a4593c..00000000 --- a/operator/deploy/chart/templates/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.rbac.enable }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} - name: {{ .Release.Name }}-fs-operator-leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: fs-operator-leader-election-role -subjects: -- kind: ServiceAccount - name: {{ .Values.controllerManager.serviceAccountName }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/metrics_auth_role.yaml b/operator/deploy/chart/templates/rbac/metrics_auth_role.yaml deleted file mode 100755 index 1458716f..00000000 --- a/operator/deploy/chart/templates/rbac/metrics_auth_role.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.rbac.enable .Values.metrics.enable }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: fs-operator-metrics-auth-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/metrics_auth_role_binding.yaml b/operator/deploy/chart/templates/rbac/metrics_auth_role_binding.yaml deleted file mode 100755 index 2f5fb6ff..00000000 --- a/operator/deploy/chart/templates/rbac/metrics_auth_role_binding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.rbac.enable .Values.metrics.enable }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: {{ .Release.Name }}-fs-operator-metrics-auth-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: fs-operator-metrics-auth-role -subjects: -- kind: ServiceAccount - name: {{ .Values.controllerManager.serviceAccountName }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/metrics_reader_role.yaml b/operator/deploy/chart/templates/rbac/metrics_reader_role.yaml deleted file mode 100755 index 495caa48..00000000 --- a/operator/deploy/chart/templates/rbac/metrics_reader_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if and .Values.rbac.enable .Values.metrics.enable }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: fs-operator-metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/packages_admin_role.yaml b/operator/deploy/chart/templates/rbac/packages_admin_role.yaml deleted file mode 100755 index 923a4c35..00000000 --- a/operator/deploy/chart/templates/rbac/packages_admin_role.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: packages-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/packages_editor_role.yaml b/operator/deploy/chart/templates/rbac/packages_editor_role.yaml deleted file mode 100755 index 2aec9a1b..00000000 --- a/operator/deploy/chart/templates/rbac/packages_editor_role.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: packages-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/packages_viewer_role.yaml b/operator/deploy/chart/templates/rbac/packages_viewer_role.yaml deleted file mode 100755 index 3c1345ff..00000000 --- a/operator/deploy/chart/templates/rbac/packages_viewer_role.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.rbac.enable }} -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: packages-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/role.yaml b/operator/deploy/chart/templates/rbac/role.yaml deleted file mode 100755 index 9ba521c1..00000000 --- a/operator/deploy/chart/templates/rbac/role.yaml +++ /dev/null @@ -1,59 +0,0 @@ -{{- if .Values.rbac.enable }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: fs-operator-manager-role -rules: -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - - package - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/finalizers - - package/finalizers - verbs: - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - - package/status - verbs: - - get - - patch - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/role_binding.yaml b/operator/deploy/chart/templates/rbac/role_binding.yaml deleted file mode 100755 index 2bdd9aa4..00000000 --- a/operator/deploy/chart/templates/rbac/role_binding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.rbac.enable }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - name: {{ .Release.Name }}-fs-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: fs-operator-manager-role -subjects: -- kind: ServiceAccount - name: {{ .Values.controllerManager.serviceAccountName }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/operator/deploy/chart/templates/rbac/service_account.yaml b/operator/deploy/chart/templates/rbac/service_account.yaml deleted file mode 100755 index 93e0a323..00000000 --- a/operator/deploy/chart/templates/rbac/service_account.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.rbac.enable }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} - {{- if and .Values.controllerManager.serviceAccount .Values.controllerManager.serviceAccount.annotations }} - annotations: - {{- range $key, $value := .Values.controllerManager.serviceAccount.annotations }} - {{ $key }}: {{ $value }} - {{- end }} - {{- end }} - name: {{ .Values.controllerManager.serviceAccountName }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/operator/deploy/chart/templates/webhook/service.yaml b/operator/deploy/chart/templates/webhook/service.yaml deleted file mode 100644 index 442afa65..00000000 --- a/operator/deploy/chart/templates/webhook/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.webhook.enable }} -apiVersion: v1 -kind: Service -metadata: - name: operator-webhook-service - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - ports: - - port: 443 - protocol: TCP - targetPort: 9443 - selector: - control-plane: controller-manager -{{- end }} diff --git a/operator/deploy/chart/templates/webhook/webhooks.yaml b/operator/deploy/chart/templates/webhook/webhooks.yaml deleted file mode 100644 index aec57d5a..00000000 --- a/operator/deploy/chart/templates/webhook/webhooks.yaml +++ /dev/null @@ -1,109 +0,0 @@ -{{- if .Values.webhook.enable }} -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: operator-mutating-webhook-configuration - namespace: {{ .Release.Namespace }} - annotations: - {{- if .Values.certmanager.enable }} - cert-manager.io/inject-ca-from: "{{ $.Release.Namespace }}/serving-cert" - {{- end }} - labels: - {{- include "chart.labels" . | nindent 4 }} -webhooks: - - name: mfunction-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: {{ .Release.Namespace }} - path: /mutate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - functions - - name: mpackage-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: {{ .Release.Namespace }} - path: /mutate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - packages ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: operator-validating-webhook-configuration - namespace: {{ .Release.Namespace }} - annotations: - {{- if .Values.certmanager.enable }} - cert-manager.io/inject-ca-from: "{{ $.Release.Namespace }}/serving-cert" - {{- end }} - labels: - {{- include "chart.labels" . | nindent 4 }} -webhooks: - - name: vfunction-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: {{ .Release.Namespace }} - path: /validate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - - DELETE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - functions - - name: vpackage-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: {{ .Release.Namespace }} - path: /validate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - - DELETE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - packages -{{- end }} diff --git a/operator/deploy/chart/values.yaml b/operator/deploy/chart/values.yaml deleted file mode 100644 index a7756d8e..00000000 --- a/operator/deploy/chart/values.yaml +++ /dev/null @@ -1,114 +0,0 @@ -# [MANAGER]: Manager Deployment Configurations -controllerManager: - replicas: 1 - container: - image: - repository: functionstream/operator - tag: latest - imagePullPolicy: Always - args: - - "--leader-elect" - - "--metrics-bind-address=:8443" - - "--health-probe-bind-address=:8081" - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - livenessProbe: - initialDelaySeconds: 15 - periodSeconds: 20 - httpGet: - path: /healthz - port: 8081 - readinessProbe: - initialDelaySeconds: 5 - periodSeconds: 10 - httpGet: - path: /readyz - port: 8081 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 - serviceAccountName: functionstream-operator - -createNamespace: false - -# [RBAC]: To enable RBAC (Permissions) configurations -rbac: - enable: true - -# [CRDs]: To enable the CRDs -crd: - # This option determines whether the CRDs are included - # in the installation process. - enable: true - - # Enabling this option adds the "helm.sh/resource-policy": keep - # annotation to the CRD, ensuring it remains installed even when - # the Helm release is uninstalled. - # NOTE: Removing the CRDs will also remove all cert-manager CR(s) - # (Certificates, Issuers, ...) due to garbage collection. - keep: true - -# [METRICS]: Set to true to generate manifests for exporting metrics. -# To disable metrics export set false, and ensure that the -# ControllerManager argument "--metrics-bind-address=:8443" is removed. -metrics: - enable: true - -# [WEBHOOKS]: Webhooks configuration -# The following configuration is automatically generated from the manifests -# generated by controller-gen. To update run 'make manifests' and -# the edit command with the '--force' flag -webhook: - enable: true - -# [PROMETHEUS]: To enable a ServiceMonitor to export metrics to Prometheus set true -prometheus: - enable: false - -# [CERT-MANAGER]: To enable cert-manager injection to webhooks set true -certmanager: - enable: true - -# [NETWORK POLICIES]: To enable NetworkPolicies set true -networkPolicy: - enable: false - -# [PULSAR]: Pulsar configuration -pulsar: - # Enable Pulsar standalone cluster deployment - standalone: - enable: false - image: - repository: apachepulsar/pulsar - tag: "latest" - resources: - requests: - cpu: 500m - memory: 1Gi - storage: - # Enable persistence for Pulsar data - persistence: - enabled: false - size: 10Gi - storageClass: "" - service: - type: ClusterIP - ports: - pulsar: 6650 - admin: 8080 - # External Pulsar cluster configuration (used when standalone.enable is false) - serviceUrl: pulsar://your-pulsar-cluster:6650 - authPlugin: "" - authParams: "" diff --git a/operator/go.mod b/operator/go.mod deleted file mode 100644 index 849a861d..00000000 --- a/operator/go.mod +++ /dev/null @@ -1,101 +0,0 @@ -module github.com/FunctionStream/function-stream/operator - -go 1.23.0 - -godebug default=go1.23 - -require ( - github.com/go-logr/logr v1.4.2 - github.com/onsi/ginkgo/v2 v2.22.0 - github.com/onsi/gomega v1.36.1 - gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.1 - k8s.io/apiextensions-apiserver v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 - sigs.k8s.io/controller-runtime v0.20.4 -) - -require ( - cel.dev/expr v0.18.0 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect -) diff --git a/operator/go.sum b/operator/go.sum deleted file mode 100644 index db9edd2e..00000000 --- a/operator/go.sum +++ /dev/null @@ -1,247 +0,0 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= -github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= -github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= -github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/operator/hack/boilerplate.go.txt b/operator/hack/boilerplate.go.txt deleted file mode 100644 index 221dcbe0..00000000 --- a/operator/hack/boilerplate.go.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ \ No newline at end of file diff --git a/operator/hack/helm.patch b/operator/hack/helm.patch deleted file mode 100644 index fab1f6fb..00000000 --- a/operator/hack/helm.patch +++ /dev/null @@ -1,303 +0,0 @@ -diff --git a/dist/chart/Chart.yaml b/deploy/chart/Chart.yaml -index 221f200..2eac6b8 100644 ---- a/dist/chart/Chart.yaml -+++ b/deploy/chart/Chart.yaml -@@ -1,7 +1,19 @@ - apiVersion: v2 - name: operator --description: A Helm chart to distribute the project operator -+description: A Helm chart to deploy the FunctionStream operator on Kubernetes. - type: application - version: 0.1.0 - appVersion: "0.1.0" --icon: "https://example.com/icon.png" -+home: "https://github.com/FunctionStream/function-stream" -+sources: -+ - "https://github.com/FunctionStream/function-stream/operator" -+maintainers: -+ - name: Zike Yang -+ email: zike@apache.org -+keywords: -+ - serverless -+ - streaming -+ - functionstream -+ - operators -+annotations: -+ category: "Operators" -diff --git a/dist/chart/templates/manager/manager.yaml b/deploy/chart/templates/manager/manager.yaml -index 7f6c891..ce6c11d 100644 ---- a/dist/chart/templates/manager/manager.yaml -+++ b/deploy/chart/templates/manager/manager.yaml -@@ -34,13 +34,29 @@ spec: - command: - - /manager - image: {{ .Values.controllerManager.container.image.repository }}:{{ .Values.controllerManager.container.image.tag }} -- {{- if .Values.controllerManager.container.env }} -+ imagePullPolicy: {{ .Values.controllerManager.container.imagePullPolicy }} - env: -+ {{- if .Values.pulsar.standalone.enable }} -+ - name: PULSAR_SERVICE_URL -+ value: pulsar://{{ .Release.Name }}-pulsar-standalone.{{ .Release.Namespace }}.svc.cluster.local:6650 -+ {{- else if .Values.pulsar.serviceUrl }} -+ - name: PULSAR_SERVICE_URL -+ value: {{ .Values.pulsar.serviceUrl }} -+ {{- end }} -+ {{- if .Values.pulsar.authPlugin }} -+ - name: PULSAR_AUTH_PLUGIN -+ value: {{ .Values.pulsar.authPlugin }} -+ {{- end }} -+ {{- if .Values.pulsar.authParams }} -+ - name: PULSAR_AUTH_PARAMS -+ value: {{ .Values.pulsar.authParams }} -+ {{- end }} -+ {{- if .Values.controllerManager.container.env }} - {{- range $key, $value := .Values.controllerManager.container.env }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} -- {{- end }} -+ {{- end }} - livenessProbe: - {{- toYaml .Values.controllerManager.container.livenessProbe | nindent 12 }} - readinessProbe: -diff --git a/deploy/chart/templates/pulsar/service.yaml b/deploy/chart/templates/pulsar/service.yaml -new file mode 100644 -index 0000000..a460cbc ---- /dev/null -+++ b/deploy/chart/templates/pulsar/service.yaml -@@ -0,0 +1,24 @@ -+{{- if .Values.pulsar.standalone.enable }} -+apiVersion: v1 -+kind: Service -+metadata: -+ name: {{ .Release.Name }}-pulsar-standalone -+ namespace: {{ .Release.Namespace }} -+ labels: -+ {{- include "chart.labels" . | nindent 4 }} -+ app: pulsar-standalone -+spec: -+ type: {{ .Values.pulsar.standalone.service.type }} -+ ports: -+ - name: pulsar -+ port: {{ .Values.pulsar.standalone.service.ports.pulsar }} -+ targetPort: 6650 -+ protocol: TCP -+ - name: admin -+ port: {{ .Values.pulsar.standalone.service.ports.admin }} -+ targetPort: 8080 -+ protocol: TCP -+ selector: -+ {{- include "chart.selectorLabels" . | nindent 4 }} -+ app: pulsar-standalone -+{{- end }} -\ No newline at end of file -diff --git a/deploy/chart/templates/pulsar/statefulset.yaml b/deploy/chart/templates/pulsar/statefulset.yaml -new file mode 100644 -index 0000000..a7f7ef8 ---- /dev/null -+++ b/deploy/chart/templates/pulsar/statefulset.yaml -@@ -0,0 +1,77 @@ -+{{- if .Values.pulsar.standalone.enable }} -+apiVersion: apps/v1 -+kind: StatefulSet -+metadata: -+ name: {{ .Release.Name }}-pulsar-standalone -+ namespace: {{ .Release.Namespace }} -+ labels: -+ {{- include "chart.labels" . | nindent 4 }} -+ app: pulsar-standalone -+ app.kubernetes.io/component: messaging -+spec: -+ serviceName: {{ .Release.Name }}-pulsar-standalone -+ replicas: 1 -+ selector: -+ matchLabels: -+ {{- include "chart.selectorLabels" . | nindent 6 }} -+ app: pulsar-standalone -+ template: -+ metadata: -+ labels: -+ {{- include "chart.labels" . | nindent 8 }} -+ app: pulsar-standalone -+ app.kubernetes.io/component: messaging -+ spec: -+ containers: -+ - name: pulsar -+ image: {{ .Values.pulsar.standalone.image.repository }}:{{ .Values.pulsar.standalone.image.tag }} -+ command: -+ - sh -+ - -c -+ - | -+ # Initialize Pulsar standalone -+ bin/pulsar standalone -nfw -nss -+ ports: -+ - name: pulsar -+ containerPort: 6650 -+ protocol: TCP -+ - name: admin -+ containerPort: 8080 -+ protocol: TCP -+ resources: -+ {{- toYaml .Values.pulsar.standalone.resources | nindent 12 }} -+ {{- if .Values.pulsar.standalone.storage.persistence.enabled }} -+ volumeMounts: -+ - name: pulsar-data -+ mountPath: /pulsar/data -+ {{- end }} -+ livenessProbe: -+ httpGet: -+ path: /admin/v2/brokers/health -+ port: 8080 -+ initialDelaySeconds: 60 -+ periodSeconds: 30 -+ timeoutSeconds: 5 -+ failureThreshold: 3 -+ readinessProbe: -+ httpGet: -+ path: /admin/v2/brokers/health -+ port: 8080 -+ initialDelaySeconds: 30 -+ periodSeconds: 10 -+ timeoutSeconds: 3 -+ failureThreshold: 3 -+ {{- if .Values.pulsar.standalone.storage.persistence.enabled }} -+ volumeClaimTemplates: -+ - metadata: -+ name: pulsar-data -+ spec: -+ accessModes: ["ReadWriteOnce"] -+ {{- if .Values.pulsar.standalone.storage.storageClass }} -+ storageClassName: {{ .Values.pulsar.standalone.storage.storageClass }} -+ {{- end }} -+ resources: -+ requests: -+ storage: {{ .Values.pulsar.standalone.storage.size }} -+ {{- end }} -+{{- end }} -\ No newline at end of file -diff --git a/dist/chart/templates/rbac/metrics_auth_role.yaml b/deploy/chart/templates/rbac/metrics_auth_role.yaml -index b0c7913..decef92 100755 ---- a/dist/chart/templates/rbac/metrics_auth_role.yaml -+++ b/deploy/chart/templates/rbac/metrics_auth_role.yaml -@@ -4,7 +4,7 @@ kind: ClusterRole - metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} -- name: operator-metrics-auth-role -+ name: {{ .Release.Name }}-operator-metrics-auth-role - rules: - - apiGroups: - - authentication.k8s.io -diff --git a/dist/chart/templates/rbac/metrics_auth_role_binding.yaml b/deploy/chart/templates/rbac/metrics_auth_role_binding.yaml -index a13f6a6..0172099 100755 ---- a/dist/chart/templates/rbac/metrics_auth_role_binding.yaml -+++ b/deploy/chart/templates/rbac/metrics_auth_role_binding.yaml -@@ -4,7 +4,7 @@ kind: ClusterRoleBinding - metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} -- name: operator-metrics-auth-rolebinding -+ name: {{ .Release.Name }}-operator-metrics-auth-rolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole -diff --git a/dist/chart/templates/rbac/metrics_reader_role.yaml b/deploy/chart/templates/rbac/metrics_reader_role.yaml -index 1f0a0f5..f5655e7 100755 ---- a/dist/chart/templates/rbac/metrics_reader_role.yaml -+++ b/deploy/chart/templates/rbac/metrics_reader_role.yaml -@@ -4,7 +4,7 @@ kind: ClusterRole - metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} -- name: operator-metrics-reader -+ name: {{ .Release.Name }}-operator-metrics-reader - rules: - - nonResourceURLs: - - "/metrics" -diff --git a/dist/chart/templates/rbac/role.yaml b/deploy/chart/templates/rbac/role.yaml -index 3ae0961..a32998a 100755 ---- a/dist/chart/templates/rbac/role.yaml -+++ b/deploy/chart/templates/rbac/role.yaml -@@ -5,7 +5,7 @@ kind: ClusterRole - metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} -- name: operator-manager-role -+ name: functionstream-operator-manager-role - rules: - - apiGroups: - - apps -diff --git a/dist/chart/templates/rbac/role_binding.yaml b/deploy/chart/templates/rbac/role_binding.yaml -index a4f2cfa..77c8250 100755 ---- a/dist/chart/templates/rbac/role_binding.yaml -+++ b/deploy/chart/templates/rbac/role_binding.yaml -@@ -4,11 +4,11 @@ kind: ClusterRoleBinding - metadata: - labels: - {{- include "chart.labels" . | nindent 4 }} -- name: operator-manager-rolebinding -+ name: {{ .Release.Name }}-functionstream-operator-manager-rolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole -- name: operator-manager-role -+ name: functionstream-operator-manager-role - subjects: - - kind: ServiceAccount - name: {{ .Values.controllerManager.serviceAccountName }} -diff --git a/dist/chart/values.yaml b/deploy/chart/values.yaml -index 9357643..4851d3b 100644 ---- a/dist/chart/values.yaml -+++ b/deploy/chart/values.yaml -@@ -3,8 +3,9 @@ controllerManager: - replicas: 1 - container: - image: -- repository: controller -+ repository: functionstream/operator - tag: latest -+ imagePullPolicy: IfNotPresent - args: - - "--leader-elect" - - "--metrics-bind-address=:8443" -@@ -38,7 +39,7 @@ controllerManager: - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 -- serviceAccountName: operator-controller-manager -+ serviceAccountName: functionstream-operator - - # [RBAC]: To enable RBAC (Permissions) configurations - rbac: -@@ -81,3 +82,31 @@ certmanager: - # [NETWORK POLICIES]: To enable NetworkPolicies set true - networkPolicy: - enable: false -+ -+# [PULSAR]: Pulsar configuration -+pulsar: -+ # Enable Pulsar standalone cluster deployment -+ standalone: -+ enable: false -+ image: -+ repository: apachepulsar/pulsar -+ tag: "latest" -+ resources: -+ requests: -+ cpu: 500m -+ memory: 1Gi -+ storage: -+ # Enable persistence for Pulsar data -+ persistence: -+ enabled: false -+ size: 10Gi -+ storageClass: "" -+ service: -+ type: ClusterIP -+ ports: -+ pulsar: 6650 -+ admin: 8080 -+ # External Pulsar cluster configuration (used when standalone.enable is false) -+ serviceUrl: pulsar://your-pulsar-cluster:6650 -+ authPlugin: "" -+ authParams: "" diff --git a/operator/internal/controller/function_controller.go b/operator/internal/controller/function_controller.go deleted file mode 100644 index abb82875..00000000 --- a/operator/internal/controller/function_controller.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "reflect" - - "github.com/FunctionStream/function-stream/operator/utils" - "k8s.io/apimachinery/pkg/util/json" - - "gopkg.in/yaml.v3" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" -) - -// Config holds operator configuration (e.g. for messaging systems) -type Config struct { - PulsarServiceURL string - PulsarAuthPlugin string - PulsarAuthParams string -} - -// FunctionReconciler reconciles a Function object -type FunctionReconciler struct { - client.Client - Scheme *runtime.Scheme - Config Config -} - -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=functions,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=packages,verbs=get;list;watch -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=functions/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=functions/finalizers,verbs=update -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Function object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile -func (r *FunctionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := logf.FromContext(ctx) - log.Info("Reconciling Function", "function", req.NamespacedName) - - // 1. Get Function - var fn fsv1alpha1.Function - if err := r.Get(ctx, req.NamespacedName, &fn); err != nil { - if errors.IsNotFound(err) { - return ctrl.Result{}, nil - } - return ctrl.Result{}, err - } - - // 2. Get package label for later use - packageLabel := generatePackageLabel(&fn) - - // 3. Get Package - var pkg fsv1alpha1.Package - packageNamespace := fn.Spec.PackageRef.Namespace - if packageNamespace == "" { - packageNamespace = req.Namespace - } - if err := r.Get(ctx, types.NamespacedName{Name: fn.Spec.PackageRef.Name, Namespace: packageNamespace}, &pkg); err != nil { - return ctrl.Result{}, err - } - image := "" - if pkg.Spec.FunctionType.Cloud != nil { - image = pkg.Spec.FunctionType.Cloud.Image - } - if image == "" { - return ctrl.Result{}, fmt.Errorf("package %s has no image", packageLabel) - } - - // 4. Build config yaml content - configYaml, err := buildFunctionConfigYaml(&fn, r.Config) - if err != nil { - log.Error(err, "Failed to marshal config yaml") - return ctrl.Result{}, err - } - - // 5. Build Deployment - deployName := fmt.Sprintf("function-%s", fn.Name) - var replicas int32 = 1 - if fn.Spec.Replicas != nil { - replicas = *fn.Spec.Replicas - } - labels := map[string]string{ - "function": fn.Name, - } - - // Create init command to write config file - initCommand := fmt.Sprintf(`cat > /config/config.yaml << 'EOF' -%s -EOF -`, configYaml) - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployName, - Namespace: fn.Namespace, - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"function": fn.Name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - InitContainers: []corev1.Container{{ - Name: "init-config", - Image: image, - Command: []string{"/bin/sh", "-c", initCommand}, - VolumeMounts: []corev1.VolumeMount{{ - Name: "function-config", - MountPath: "/config", - }}, - }}, - Containers: []corev1.Container{{ - Name: "function", - Image: image, - VolumeMounts: []corev1.VolumeMount{{ - Name: "function-config", - MountPath: "/config", - }}, - Env: []corev1.EnvVar{{ - Name: "FS_CONFIG_PATH", - Value: "/config/config.yaml", - }}, - }}, - Volumes: []corev1.Volume{{ - Name: "function-config", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }}, - }, - }, - }, - } - if err := ctrl.SetControllerReference(&fn, deployment, r.Scheme); err != nil { - return ctrl.Result{}, err - } - - // 6. Create or Update Deployment - var existingDeploy appsv1.Deployment - deployErr := r.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, &existingDeploy) - if deployErr == nil { - // Only update if spec or labels changed - if !reflect.DeepEqual(existingDeploy.Spec, deployment.Spec) || - !reflect.DeepEqual(existingDeploy.Labels, deployment.Labels) { - existingDeploy.Spec = deployment.Spec - existingDeploy.Labels = deployment.Labels - err = r.Update(ctx, &existingDeploy) - if err != nil { - return utils.HandleReconcileError(log, err, "Conflict when updating Deployment, will retry automatically") - } - } - } else if errors.IsNotFound(deployErr) { - err = r.Create(ctx, deployment) - if err != nil { - return utils.HandleReconcileError(log, err, "Conflict when creating Deployment, will retry automatically") - } - } else { - return ctrl.Result{}, deployErr - } - - // 7. Update Function Status from Deployment Status - if err := r.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, &existingDeploy); err == nil { - fn.Status = convertDeploymentStatusToFunctionStatus(&existingDeploy.Status) - if err := r.Status().Update(ctx, &fn); err != nil { - return utils.HandleReconcileError(log, err, "Conflict when updating Function status, will retry automatically") - } - } - - return ctrl.Result{}, nil -} - -// buildFunctionConfigYaml builds the config.yaml content for the function -func buildFunctionConfigYaml(fn *fsv1alpha1.Function, operatorCfg Config) (string, error) { - cfg := map[string]interface{}{} - - // Inject pulsar config from operator config - cfg["pulsar"] = map[string]interface{}{ - "serviceUrl": operatorCfg.PulsarServiceURL, - "authPlugin": operatorCfg.PulsarAuthPlugin, - "authParams": operatorCfg.PulsarAuthParams, - } - - if len(fn.Spec.Sources) > 0 { - cfg["sources"] = fn.Spec.Sources - } - if fn.Spec.RequestSource != nil { - cfg["requestSource"] = fn.Spec.RequestSource - } - if fn.Spec.SubscriptionName != "" { - cfg["subscriptionName"] = fn.Spec.SubscriptionName - } else { - cfg["subscriptionName"] = fmt.Sprintf("fs-%s", fn.Name) - } - if fn.Spec.Sink != nil { - cfg["sink"] = fn.Spec.Sink - } - if fn.Spec.Module != "" { - cfg["module"] = fn.Spec.Module - } - if fn.Spec.Config != nil { - configMap := make(map[string]interface{}) - for k, v := range fn.Spec.Config { - var r interface{} - if err := json.Unmarshal(v.Raw, &r); err != nil { - return "", fmt.Errorf("failed to unmarshal config value for key %s: %w", k, err) - } - configMap[k] = r - } - cfg["config"] = configMap - } - if fn.Spec.Description != "" { - cfg["description"] = fn.Spec.Description - } - if fn.Spec.DisplayName != "" { - cfg["displayName"] = fn.Spec.DisplayName - } - if fn.Spec.PackageRef.Name != "" { - cfg["package"] = generatePackageLabel(fn) - } - out, err := yaml.Marshal(cfg) - if err != nil { - return "", err - } - return string(out), nil -} - -// convertDeploymentStatusToFunctionStatus copies DeploymentStatus fields to FunctionStatus -func convertDeploymentStatusToFunctionStatus(ds *appsv1.DeploymentStatus) fsv1alpha1.FunctionStatus { - return fsv1alpha1.FunctionStatus{ - AvailableReplicas: ds.AvailableReplicas, - ReadyReplicas: ds.ReadyReplicas, - Replicas: ds.Replicas, - UpdatedReplicas: ds.UpdatedReplicas, - ObservedGeneration: ds.ObservedGeneration, - } -} - -func hasFunctionLabel(obj client.Object) bool { - labels := obj.GetLabels() - _, ok := labels["function"] - return ok -} - -// generatePackageLabel generates a package label in the format "{namespace}.{packageName}" -func generatePackageLabel(fn *fsv1alpha1.Function) string { - packageNamespace := fn.Spec.PackageRef.Namespace - if packageNamespace == "" { - packageNamespace = fn.Namespace - } - return fmt.Sprintf("%s.%s", packageNamespace, fn.Spec.PackageRef.Name) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *FunctionReconciler) SetupWithManager(mgr ctrl.Manager) error { - functionLabelPredicate := predicate.NewPredicateFuncs(hasFunctionLabel) - return ctrl.NewControllerManagedBy(mgr). - For(&fsv1alpha1.Function{}). - Owns(&appsv1.Deployment{}, builder.WithPredicates(functionLabelPredicate)). - Watches( - &fsv1alpha1.Package{}, - handler.EnqueueRequestsFromMapFunc(r.mapPackageToFunctions), - ). - Named("function"). - Complete(r) -} - -// mapPackageToFunctions maps Package changes to related Functions -func (r *FunctionReconciler) mapPackageToFunctions(ctx context.Context, obj client.Object) []reconcile.Request { - packageObj, ok := obj.(*fsv1alpha1.Package) - if !ok { - return nil - } - - var requests []reconcile.Request - - // Get Functions that reference this Package using the new label format {namespace}.{package name} - packageLabel := fmt.Sprintf("%s.%s", packageObj.Namespace, packageObj.Name) - var functions fsv1alpha1.FunctionList - if err := r.List(ctx, &functions, - client.MatchingLabels(map[string]string{"package": packageLabel})); err != nil { - return nil - } - - for _, function := range functions.Items { - // Check if this function actually references this package - if function.Spec.PackageRef.Name == packageObj.Name { - packageNamespace := function.Spec.PackageRef.Namespace - if packageNamespace == "" { - packageNamespace = function.Namespace - } - if packageNamespace == packageObj.Namespace { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: function.Name, - Namespace: function.Namespace, - }, - }) - } - } - } - - return requests -} diff --git a/operator/internal/controller/function_controller_test.go b/operator/internal/controller/function_controller_test.go deleted file mode 100644 index 53a6acca..00000000 --- a/operator/internal/controller/function_controller_test.go +++ /dev/null @@ -1,786 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var _ = Describe("Function Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" - - ctx := context.Background() - - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - function := &fsv1alpha1.Function{} - - BeforeEach(func() { - By("creating the custom resource for the Kind Function") - err := k8sClient.Get(ctx, typeNamespacedName, function) - if err != nil && errors.IsNotFound(err) { - resource := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: typeNamespacedName.Name, - Namespace: typeNamespacedName.Namespace, - }, - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } - }) - - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &fsv1alpha1.Function{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - By("Cleanup the specific resource instance Function") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource and create Deployment with init container, and update Status", func() { - By("Reconciling the created resource") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create a Package resource first - pkg := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - - // Re-fetch the latest Function object to ensure the Name field is set - Expect(k8sClient.Get(ctx, typeNamespacedName, function)).To(Succeed()) - - // Patch the Function to reference the Package and fill required fields - patch := client.MergeFrom(function.DeepCopy()) - function.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "test-pkg"} - function.Spec.Module = "mod" - function.Spec.Sink = &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out"}} - function.Spec.RequestSource = &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in"}} - Expect(k8sClient.Patch(ctx, function, patch)).To(Succeed()) - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - - // Check Deployment - deployName := "function-" + typeNamespacedName.Name - deploy := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: typeNamespacedName.Namespace}, deploy)).To(Succeed()) - - // Verify init container exists and has correct configuration - Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) - initContainer := deploy.Spec.Template.Spec.InitContainers[0] - Expect(initContainer.Name).To(Equal("init-config")) - Expect(initContainer.Image).To(Equal("busybox:latest")) - Expect(initContainer.Command).To(HaveLen(3)) - Expect(initContainer.Command[0]).To(Equal("/bin/sh")) - Expect(initContainer.Command[1]).To(Equal("-c")) - - // Verify the init command contains config.yaml content - initCommand := initContainer.Command[2] - Expect(initCommand).To(ContainSubstring("cat > /config/config.yaml")) - - // Verify main container configuration - Expect(deploy.Spec.Template.Spec.Containers).To(HaveLen(1)) - mainContainer := deploy.Spec.Template.Spec.Containers[0] - Expect(mainContainer.Name).To(Equal("function")) - Expect(mainContainer.Image).To(Equal("busybox:latest")) - - // Verify volume mounts - Expect(mainContainer.VolumeMounts).To(HaveLen(1)) - Expect(mainContainer.VolumeMounts[0].Name).To(Equal("function-config")) - Expect(mainContainer.VolumeMounts[0].MountPath).To(Equal("/config")) - - // Verify environment variable - Expect(mainContainer.Env).To(HaveLen(1)) - Expect(mainContainer.Env[0].Name).To(Equal("FS_CONFIG_PATH")) - Expect(mainContainer.Env[0].Value).To(Equal("/config/config.yaml")) - - // Verify volumes - Expect(deploy.Spec.Template.Spec.Volumes).To(HaveLen(1)) - Expect(deploy.Spec.Template.Spec.Volumes[0].Name).To(Equal("function-config")) - Expect(deploy.Spec.Template.Spec.Volumes[0].EmptyDir).NotTo(BeNil()) - - // Verify labels - Expect(deploy.Labels).To(HaveKey("function")) - Expect(deploy.Labels["function"]).To(Equal(typeNamespacedName.Name)) - - // Simulate Deployment status update - patchDeploy := client.MergeFrom(deploy.DeepCopy()) - deploy.Status.AvailableReplicas = 1 - deploy.Status.ReadyReplicas = 1 - deploy.Status.Replicas = 1 - deploy.Status.UpdatedReplicas = 1 - deploy.Status.ObservedGeneration = 2 - Expect(k8sClient.Status().Patch(ctx, deploy, patchDeploy)).To(Succeed()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - - // Check Function Status - fn := &fsv1alpha1.Function{} - Expect(k8sClient.Get(ctx, typeNamespacedName, fn)).To(Succeed()) - Expect(fn.Status.AvailableReplicas).To(Equal(int32(1))) - Expect(fn.Status.ReadyReplicas).To(Equal(int32(1))) - Expect(fn.Status.Replicas).To(Equal(int32(1))) - Expect(fn.Status.UpdatedReplicas).To(Equal(int32(1))) - Expect(fn.Status.ObservedGeneration).To(Equal(int64(2))) - - // Test deployment update when function spec changes - // Update function spec to trigger deployment update - patchFn := client.MergeFrom(fn.DeepCopy()) - fn.Spec.Description = "Updated description" - Expect(k8sClient.Patch(ctx, fn, patchFn)).To(Succeed()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - - // Verify deployment was updated - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: typeNamespacedName.Namespace}, deploy)).To(Succeed()) - // The deployment should still exist and be updated - Expect(deploy).NotTo(BeNil()) - }) - - It("should only reconcile when Deployment has 'function' label", func() { - By("setting up a Function and its labeled Deployment") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - pkg := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-label", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - - fn := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-label", - Namespace: "default", - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-label"}, - Module: "mod", - SubscriptionName: "sub", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in"}}, - }, - } - Expect(k8sClient.Create(ctx, fn)).To(Succeed()) - - // Initial reconcile to create Deployment - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - deployName := "function-" + fn.Name - deploy := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, deploy)).To(Succeed()) - - // Create a Deployment without 'function' label - unlabeledDeploy := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "unlabeled-deploy", - Namespace: fn.Namespace, - Labels: map[string]string{"app": "test"}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &[]int32{1}[0], - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "test", - Image: "busybox:latest", - }}, - }, - }, - }, - } - Expect(k8sClient.Create(ctx, unlabeledDeploy)).To(Succeed()) - - // Manually call Reconcile to simulate the event, but the hash should not change - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Get Deployment again, the hash should remain unchanged - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, deploy)).To(Succeed()) - }) - - It("should automatically add package label to Function", func() { - By("creating a Function without package label") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create Package first - pkg := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-label-auto", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - - // Create Function without package label - fn := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-label-auto", - Namespace: "default", - Labels: map[string]string{"app": "test"}, // No package label initially - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-label-auto"}, - Module: "mod", - SubscriptionName: "sub", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in"}}, - }, - } - Expect(k8sClient.Create(ctx, fn)).To(Succeed()) - - // Reconcile the Function - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Verify other labels are preserved - updatedFn := &fsv1alpha1.Function{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, updatedFn)).To(Succeed()) - Expect(updatedFn.Labels).To(HaveKey("app")) - Expect(updatedFn.Labels["app"]).To(Equal("test")) - }) - - It("should update package label when Function package changes", func() { - By("creating Functions with different packages") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create two Packages - pkg1 := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-1", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package 1", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg1)).To(Succeed()) - - pkg2 := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-2", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package 2", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "nginx:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg2)).To(Succeed()) - - // Create Function with initial package - fn := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-package-change", - Namespace: "default", - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-1"}, - Module: "mod", - SubscriptionName: "sub", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in"}}, - }, - } - Expect(k8sClient.Create(ctx, fn)).To(Succeed()) - - // Initial reconcile - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Change the package - patch := client.MergeFrom(fn.DeepCopy()) - fn.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "test-pkg-2"} - Expect(k8sClient.Patch(ctx, fn, patch)).To(Succeed()) - - // Reconcile again - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Verify deployment was updated with new image - deployName := "function-" + fn.Name - deploy := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, deploy)).To(Succeed()) - Expect(deploy.Spec.Template.Spec.Containers[0].Image).To(Equal("nginx:latest")) - }) - - It("should map Package changes to related Functions", func() { - By("creating multiple Functions that reference the same Package") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create Package - pkg := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-mapping", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - - // Create multiple Functions that reference the same Package - fn1 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-mapping-1", - Namespace: "default", - Labels: map[string]string{"package": "default.test-pkg-mapping"}, - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-mapping"}, - Module: "mod1", - SubscriptionName: "sub1", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out1"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in1"}}, - }, - } - Expect(k8sClient.Create(ctx, fn1)).To(Succeed()) - - fn2 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-mapping-2", - Namespace: "default", - Labels: map[string]string{"package": "default.test-pkg-mapping"}, - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-mapping"}, - Module: "mod2", - SubscriptionName: "sub2", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out2"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in2"}}, - }, - } - Expect(k8sClient.Create(ctx, fn2)).To(Succeed()) - - // Create a Function that references a different Package - fn3 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-mapping-3", - Namespace: "default", - Labels: map[string]string{"package": "default.different-pkg"}, - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "different-pkg"}, - Module: "mod3", - SubscriptionName: "sub3", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out3"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in3"}}, - }, - } - Expect(k8sClient.Create(ctx, fn3)).To(Succeed()) - - // Initial reconcile - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn1.Name, Namespace: fn1.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn2.Name, Namespace: fn2.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Test the mapPackageToFunctions function - requests := controllerReconciler.mapPackageToFunctions(ctx, pkg) - Expect(requests).To(HaveLen(2)) - - // Verify the requests contain the correct Functions - requestNames := make(map[string]bool) - for _, req := range requests { - requestNames[req.NamespacedName.Name] = true - } - Expect(requestNames).To(HaveKey("test-fn-mapping-1")) - Expect(requestNames).To(HaveKey("test-fn-mapping-2")) - Expect(requestNames).NotTo(HaveKey("test-fn-mapping-3")) // Should not be included - - // Test that updating the Package triggers reconciliation of related Functions - // Update the Package image - patch := client.MergeFrom(pkg.DeepCopy()) - pkg.Spec.FunctionType.Cloud.Image = "nginx:latest" - Expect(k8sClient.Patch(ctx, pkg, patch)).To(Succeed()) - - // Simulate the Package update by calling mapPackageToFunctions - requests = controllerReconciler.mapPackageToFunctions(ctx, pkg) - Expect(requests).To(HaveLen(2)) - - // Manually reconcile the Functions to simulate the triggered reconciliation - for _, req := range requests { - _, err := controllerReconciler.Reconcile(ctx, req) - Expect(err).NotTo(HaveOccurred()) - } - - // Verify that the Deployments were updated with the new image - deploy1 := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "function-" + fn1.Name, Namespace: fn1.Namespace}, deploy1)).To(Succeed()) - Expect(deploy1.Spec.Template.Spec.Containers[0].Image).To(Equal("nginx:latest")) - - deploy2 := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "function-" + fn2.Name, Namespace: fn2.Namespace}, deploy2)).To(Succeed()) - Expect(deploy2.Spec.Template.Spec.Containers[0].Image).To(Equal("nginx:latest")) - }) - - It("should use the specified replicas from FunctionSpec", func() { - By("creating a Function with custom replicas") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create a Package resource first - pkg := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-replicas", - Namespace: "default", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package Replicas", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - - // Create a Function with custom replicas - customReplicas := int32(3) - fn := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-replicas", - Namespace: "default", - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-replicas"}, - Module: "mod", - Replicas: &customReplicas, - SubscriptionName: "sub", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in"}}, - }, - } - Expect(k8sClient.Create(ctx, fn)).To(Succeed()) - - // Reconcile the Function - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Check that the Deployment has the correct number of replicas - deployName := "function-" + fn.Name - deploy := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, deploy)).To(Succeed()) - Expect(deploy.Spec.Replicas).NotTo(BeNil()) - Expect(*deploy.Spec.Replicas).To(Equal(int32(3))) - - // Test updating replicas - newReplicas := int32(5) - patch := client.MergeFrom(fn.DeepCopy()) - fn.Spec.Replicas = &newReplicas - Expect(k8sClient.Patch(ctx, fn, patch)).To(Succeed()) - - // Reconcile again - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn.Name, Namespace: fn.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Verify the deployment was updated with new replicas - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: fn.Namespace}, deploy)).To(Succeed()) - Expect(*deploy.Spec.Replicas).To(Equal(int32(5))) - - // Test default replicas when not specified - fnDefault := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-default-replicas", - Namespace: "default", - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-replicas"}, - Module: "mod", - SubscriptionName: "sub-default", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out-default"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in-default"}}, - }, - } - Expect(k8sClient.Create(ctx, fnDefault)).To(Succeed()) - - // Reconcile the Function with default replicas - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fnDefault.Name, Namespace: fnDefault.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Check that the Deployment has default replicas (1) - deployDefaultName := "function-" + fnDefault.Name - deployDefault := &appsv1.Deployment{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployDefaultName, Namespace: fnDefault.Namespace}, deployDefault)).To(Succeed()) - Expect(deployDefault.Spec.Replicas).NotTo(BeNil()) - Expect(*deployDefault.Spec.Replicas).To(Equal(int32(1))) - }) - - It("should handle Package updates in different namespaces", func() { - By("creating Functions and Packages in different namespaces") - controllerReconciler := &FunctionReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - Config: Config{ - PulsarServiceURL: "pulsar://test-broker:6650", - PulsarAuthPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken", - PulsarAuthParams: "token:my-token", - }, - } - - // Create namespaces - ns1 := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace1", - }, - } - Expect(k8sClient.Create(ctx, ns1)).To(Succeed()) - - ns2 := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "namespace2", - }, - } - Expect(k8sClient.Create(ctx, ns2)).To(Succeed()) - - // Create Package in namespace1 - pkg1 := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-ns1", - Namespace: "namespace1", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package NS1", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "busybox:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg1)).To(Succeed()) - - // Create Package in namespace2 - pkg2 := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pkg-ns2", - Namespace: "namespace2", - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "Test Package NS2", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{ - Cloud: &fsv1alpha1.CloudType{Image: "nginx:latest"}, - }, - Modules: map[string]fsv1alpha1.Module{}, - }, - } - Expect(k8sClient.Create(ctx, pkg2)).To(Succeed()) - - // Create Function in namespace1 - fn1 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-ns1", - Namespace: "namespace1", - Labels: map[string]string{"package": "namespace1.test-pkg-ns1"}, - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-ns1"}, - Module: "mod1", - SubscriptionName: "sub1", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out1"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in1"}}, - }, - } - Expect(k8sClient.Create(ctx, fn1)).To(Succeed()) - - // Create Function in namespace2 - fn2 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-fn-ns2", - Namespace: "namespace2", - Labels: map[string]string{"package": "namespace2.test-pkg-ns2"}, - }, - Spec: fsv1alpha1.FunctionSpec{ - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg-ns2"}, - Module: "mod2", - SubscriptionName: "sub2", - Sink: &fsv1alpha1.SinkSpec{Pulsar: &fsv1alpha1.PulsarSinkSpec{Topic: "out2"}}, - RequestSource: &fsv1alpha1.SourceSpec{Pulsar: &fsv1alpha1.PulsarSourceSpec{Topic: "in2"}}, - }, - } - Expect(k8sClient.Create(ctx, fn2)).To(Succeed()) - - // Initial reconcile - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn1.Name, Namespace: fn1.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: types.NamespacedName{Name: fn2.Name, Namespace: fn2.Namespace}, - }) - Expect(err).NotTo(HaveOccurred()) - - // Test that Package updates only affect Functions in the same namespace - requests1 := controllerReconciler.mapPackageToFunctions(ctx, pkg1) - Expect(requests1).To(HaveLen(1)) - Expect(requests1[0].NamespacedName.Name).To(Equal("test-fn-ns1")) - Expect(requests1[0].NamespacedName.Namespace).To(Equal("namespace1")) - - requests2 := controllerReconciler.mapPackageToFunctions(ctx, pkg2) - Expect(requests2).To(HaveLen(1)) - Expect(requests2[0].NamespacedName.Name).To(Equal("test-fn-ns2")) - Expect(requests2[0].NamespacedName.Namespace).To(Equal("namespace2")) - }) - }) -}) diff --git a/operator/internal/controller/packages_controller.go b/operator/internal/controller/packages_controller.go deleted file mode 100644 index fa114c46..00000000 --- a/operator/internal/controller/packages_controller.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" -) - -// PackagesReconciler reconciles a Package object -type PackagesReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=package,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=package/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=package/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Package object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile -func (r *PackagesReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = logf.FromContext(ctx) - - // TODO(user): your logic here - - return ctrl.Result{}, nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *PackagesReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&fsv1alpha1.Package{}). - Named("packages"). - Complete(r) -} diff --git a/operator/internal/controller/packages_controller_test.go b/operator/internal/controller/packages_controller_test.go deleted file mode 100644 index a9890e92..00000000 --- a/operator/internal/controller/packages_controller_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" -) - -var _ = Describe("Package Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" - - ctx := context.Background() - - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - packages := &fsv1alpha1.Package{} - - BeforeEach(func() { - By("creating the custom resource for the Kind Package") - err := k8sClient.Get(ctx, typeNamespacedName, packages) - if err != nil && errors.IsNotFound(err) { - resource := &fsv1alpha1.Package{ - ObjectMeta: metav1.ObjectMeta{ - Name: typeNamespacedName.Name, - Namespace: typeNamespacedName.Namespace, - }, - Spec: fsv1alpha1.PackageSpec{ - DisplayName: "test", - Description: "desc", - FunctionType: fsv1alpha1.FunctionType{}, - Modules: map[string]fsv1alpha1.Module{"mod": {DisplayName: "mod", Description: "desc"}}, - }, - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } - }) - - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &fsv1alpha1.Package{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - By("Cleanup the specific resource instance Package") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &PackagesReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. - }) - }) -}) diff --git a/operator/internal/controller/suite_test.go b/operator/internal/controller/suite_test.go deleted file mode 100644 index 21306439..00000000 --- a/operator/internal/controller/suite_test.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "os" - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var ( - ctx context.Context - cancel context.CancelFunc - testEnv *envtest.Environment - cfg *rest.Config - k8sClient client.Client -) - -func TestControllers(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Controller Suite") -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - ctx, cancel = context.WithCancel(context.TODO()) - - var err error - err = fsv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - } - - // Retrieve the first found binary directory to allow running tests from IDEs - if getFirstFoundEnvTestBinaryDir() != "" { - testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() - } - - // cfg is defined in this file globally. - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) -}) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - cancel() - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) - -// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. -// ENVTEST-based tests depend on specific binaries, usually located in paths set by -// controller-runtime. When running tests directly (e.g., via an IDE) without using -// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. -// -// This function streamlines the process by finding the required binaries, similar to -// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are -// properly set up, run 'make setup-envtest' beforehand. -func getFirstFoundEnvTestBinaryDir() string { - basePath := filepath.Join("..", "..", "bin", "k8s") - entries, err := os.ReadDir(basePath) - if err != nil { - logf.Log.Error(err, "Failed to read directory", "path", basePath) - return "" - } - for _, entry := range entries { - if entry.IsDir() { - return filepath.Join(basePath, entry.Name()) - } - } - return "" -} diff --git a/operator/internal/webhook/v1alpha1/function_webhook.go b/operator/internal/webhook/v1alpha1/function_webhook.go deleted file mode 100644 index f624336b..00000000 --- a/operator/internal/webhook/v1alpha1/function_webhook.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" -) - -// nolint:unused -// log is for logging in this package. -var functionlog = logf.Log.WithName("function-resource") - -// SetupFunctionWebhookWithManager registers the webhook for Function in the manager. -func SetupFunctionWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr).For(&fsv1alpha1.Function{}). - WithValidator(&FunctionCustomValidator{Client: mgr.GetClient()}). - WithDefaulter(&FunctionCustomDefaulter{}). - Complete() -} - -// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -// +kubebuilder:webhook:path=/mutate-fs-functionstream-github-io-v1alpha1-function,mutating=true,failurePolicy=fail,sideEffects=None,groups=fs.functionstream.github.io,resources=functions,verbs=create;update,versions=v1alpha1,name=mfunction-v1alpha1.kb.io,admissionReviewVersions=v1 -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=packages,verbs=get;list;watch -// +kubebuilder:rbac:groups=fs.functionstream.github.io,resources=functions,verbs=get;list;watch - -// FunctionCustomDefaulter struct is responsible for setting default values on the custom resource of the -// Kind Function when those are created or updated. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as it is used only for temporary operations and does not need to be deeply copied. -type FunctionCustomDefaulter struct { - // TODO(user): Add more fields as needed for defaulting -} - -var _ webhook.CustomDefaulter = &FunctionCustomDefaulter{} - -// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Function. -func (d *FunctionCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { - function, ok := obj.(*fsv1alpha1.Function) - - if !ok { - return fmt.Errorf("expected an Function object but got %T", obj) - } - functionlog.Info("Defaulting for Function", "name", function.GetName()) - - // Ensure the 'package' label is always set to the current Spec.PackageRef value - if function.Labels == nil { - function.Labels = make(map[string]string) - } - packageNamespace := function.Spec.PackageRef.Namespace - if packageNamespace == "" { - packageNamespace = function.Namespace - } - packageLabel := fmt.Sprintf("%s.%s", packageNamespace, function.Spec.PackageRef.Name) - function.Labels["package"] = packageLabel - - return nil -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. -// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -// +kubebuilder:webhook:path=/validate-fs-functionstream-github-io-v1alpha1-function,mutating=false,failurePolicy=fail,sideEffects=None,groups=fs.functionstream.github.io,resources=functions,verbs=create;update;delete,versions=v1alpha1,name=vfunction-v1alpha1.kb.io,admissionReviewVersions=v1 - -// FunctionCustomValidator struct is responsible for validating the Function resource -// when it is created, updated, or deleted. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as this struct is used only for temporary operations and does not need to be deeply copied. -type FunctionCustomValidator struct { - Client client.Client - // TODO(user): Add more fields as needed for validation -} - -var _ webhook.CustomValidator = &FunctionCustomValidator{} - -// validateReferences checks that all referenced resources in the Function exist. -func (v *FunctionCustomValidator) validateReferences(ctx context.Context, function *fsv1alpha1.Function) error { - // Check if the referenced package exists - var pkg fsv1alpha1.Package - packageNamespace := function.Spec.PackageRef.Namespace - if packageNamespace == "" { - packageNamespace = function.Namespace - } - err := v.Client.Get(ctx, client.ObjectKey{ - Namespace: packageNamespace, - Name: function.Spec.PackageRef.Name, - }, &pkg) - if err != nil { - return fmt.Errorf("referenced package '%s' not found in namespace '%s': %w", function.Spec.PackageRef.Name, packageNamespace, err) - } - // Add more reference checks here in the future as needed - return nil -} - -// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Function. -func (v *FunctionCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - function, ok := obj.(*fsv1alpha1.Function) - if !ok { - return nil, fmt.Errorf("expected a Function object but got %T", obj) - } - functionlog.Info("Validation for Function upon creation", "name", function.GetName()) - - if err := v.validateReferences(ctx, function); err != nil { - return nil, err - } - - return nil, nil -} - -// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Function. -func (v *FunctionCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - function, ok := newObj.(*fsv1alpha1.Function) - if !ok { - return nil, fmt.Errorf("expected a Function object for the newObj but got %T", newObj) - } - functionlog.Info("Validation for Function upon update", "name", function.GetName()) - - if err := v.validateReferences(ctx, function); err != nil { - return nil, err - } - - return nil, nil -} - -// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Function. -func (v *FunctionCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - function, ok := obj.(*fsv1alpha1.Function) - if !ok { - return nil, fmt.Errorf("expected a Function object but got %T", obj) - } - functionlog.Info("Validation for Function upon deletion", "name", function.GetName()) - - // TODO(user): fill in your validation logic upon object deletion. - - return nil, nil -} diff --git a/operator/internal/webhook/v1alpha1/function_webhook_test.go b/operator/internal/webhook/v1alpha1/function_webhook_test.go deleted file mode 100644 index 5406092a..00000000 --- a/operator/internal/webhook/v1alpha1/function_webhook_test.go +++ /dev/null @@ -1,540 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - // TODO (user): Add any additional imports if needed -) - -const ( - defaultNamespace = "default" - existingPkgName = "existing-pkg" - testDisplayName = "test" - testDescription = "desc" -) - -var _ = Describe("Function Webhook", func() { - var ( - obj *fsv1alpha1.Function - oldObj *fsv1alpha1.Function - validator FunctionCustomValidator - defaulter FunctionCustomDefaulter - ctx context.Context - ) - - BeforeEach(func() { - ctx = context.Background() - obj = &fsv1alpha1.Function{ - ObjectMeta: fsv1alpha1.Function{}.ObjectMeta, - Spec: fsv1alpha1.FunctionSpec{ - DisplayName: "test-function", - Description: "desc", - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg"}, - Module: "test-module", - }, - } - oldObj = &fsv1alpha1.Function{ - ObjectMeta: fsv1alpha1.Function{}.ObjectMeta, - Spec: fsv1alpha1.FunctionSpec{ - DisplayName: "old-function", - Description: "desc", - PackageRef: fsv1alpha1.PackageRef{Name: "test-pkg"}, - Module: "test-module", - }, - } - validator = FunctionCustomValidator{Client: k8sClient} - Expect(validator).NotTo(BeNil(), "Expected validator to be initialized") - defaulter = FunctionCustomDefaulter{} - Expect(defaulter).NotTo(BeNil(), "Expected defaulter to be initialized") - Expect(oldObj).NotTo(BeNil(), "Expected oldObj to be initialized") - Expect(obj).NotTo(BeNil(), "Expected obj to be initialized") - // TODO (user): Add any setup logic common to all tests - }) - - AfterEach(func() { - // Clean up the test package if it exists - _ = k8sClient.Delete(ctx, &fsv1alpha1.Package{ - ObjectMeta: fsv1alpha1.Package{}.ObjectMeta, - // Namespace and Name will be set in the test - }) - // TODO (user): Add any teardown logic common to all tests - }) - - Context("Reference validation", func() { - It("should deny creation if the referenced package does not exist", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = "nonexistent-pkg" - _, err := validator.ValidateCreate(ctx, obj) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("referenced package")) - }) - - It("should allow creation if the referenced package exists", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = existingPkgName - // Create the referenced package with required fields - pkg := &fsv1alpha1.Package{} - pkg.Name = existingPkgName - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - _, err := validator.ValidateCreate(ctx, obj) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should deny update if the referenced package does not exist", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = "nonexistent-pkg" - _, err := validator.ValidateUpdate(ctx, oldObj, obj) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("referenced package")) - }) - - It("should allow update if the referenced package exists", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = existingPkgName - // Create the referenced package with required fields - pkg := &fsv1alpha1.Package{} - pkg.Name = existingPkgName - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - _, err := validator.ValidateUpdate(ctx, oldObj, obj) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Defaulter logic for package label", func() { - It("should set the 'package' label to the value of spec.package on creation", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = "pkg-on-create" - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "pkg-on-create" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Call the defaulter - obj.Labels = nil // simulate no labels set - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.pkg-on-create")) - }) - - It("should update the 'package' label to the new spec.package value on update", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef.Name = "pkg-on-update" - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "pkg-on-update" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Simulate an existing label with an old value - obj.Labels = map[string]string{"package": "old-pkg"} - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.pkg-on-update")) - }) - }) - - Context("Cross-namespace package references", func() { - It("should allow creation if the referenced package exists in a different namespace", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "cross-pkg-1", Namespace: "other-ns-1"} - - // Create the namespace first - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "other-ns-1", - }, - } - Expect(k8sClient.Create(ctx, ns)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, ns) }) - - // Create the referenced package in a different namespace - pkg := &fsv1alpha1.Package{} - pkg.Name = "cross-pkg-1" - pkg.Namespace = "other-ns-1" - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - _, err := validator.ValidateCreate(ctx, obj) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should deny creation if the referenced package does not exist in the specified namespace", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "cross-pkg-2", Namespace: "wrong-ns-2"} - - // Create the namespace first - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "wrong-ns-2", - }, - } - Expect(k8sClient.Create(ctx, ns)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, ns) }) - - // Create another namespace for the package - ns2 := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "other-ns-3", - }, - } - Expect(k8sClient.Create(ctx, ns2)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, ns2) }) - - // Create a package with the same name in a different namespace - pkg := &fsv1alpha1.Package{} - pkg.Name = "cross-pkg-2" - pkg.Namespace = "other-ns-3" // different namespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - _, err := validator.ValidateCreate(ctx, obj) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("referenced package")) - }) - - It("should set the 'package' label correctly for cross-namespace references", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "cross-pkg-3", Namespace: "other-ns-4"} - - // Create the namespace first - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "other-ns-4", - }, - } - Expect(k8sClient.Create(ctx, ns)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, ns) }) - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "cross-pkg-3" - pkg.Namespace = "other-ns-4" - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Call the defaulter - obj.Labels = nil // simulate no labels set - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "other-ns-4.cross-pkg-3")) - }) - - It("should set the 'package' label correctly for same-namespace references", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "same-pkg"} // no namespace specified - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "same-pkg" - pkg.Namespace = "default" - pkg.Spec.DisplayName = "test" - pkg.Spec.Description = "desc" - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Call the defaulter - obj.Labels = nil // simulate no labels set - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.same-pkg")) - }) - }) - - Context("Automatic package label generation", func() { - It("should automatically generate package label when function is created without labels", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "auto-gen-pkg"} - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "auto-gen-pkg" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Function starts with no labels - obj.Labels = nil - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.auto-gen-pkg")) - }) - - It("should override user-specified package label with auto-generated one", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "override-pkg"} - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "override-pkg" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // User tries to set their own package label (should be overridden) - obj.Labels = map[string]string{ - "package": "user-specified-package", - "app": "my-app", // This should be preserved - } - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.override-pkg")) - Expect(obj.Labels).To(HaveKeyWithValue("app", "my-app")) - }) - - It("should automatically generate package label for cross-namespace references", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "cross-pkg", Namespace: "other-ns"} - - // Create namespace - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "other-ns", - }, - } - Expect(k8sClient.Create(ctx, ns)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, ns) }) - - // Create the referenced package in different namespace - pkg := &fsv1alpha1.Package{} - pkg.Name = "cross-pkg" - pkg.Namespace = "other-ns" - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Function starts with no labels - obj.Labels = nil - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "other-ns.cross-pkg")) - }) - - It("should update auto-generated package label when package reference changes", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "old-pkg"} - - // Create old package - oldPkg := &fsv1alpha1.Package{} - oldPkg.Name = "old-pkg" - oldPkg.Namespace = defaultNamespace - oldPkg.Spec.DisplayName = testDisplayName - oldPkg.Spec.Description = testDescription - oldPkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - oldPkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, oldPkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, oldPkg) }) - - // Create new package - newPkg := &fsv1alpha1.Package{} - newPkg.Name = "new-pkg" - newPkg.Namespace = defaultNamespace - newPkg.Spec.DisplayName = testDisplayName - newPkg.Spec.Description = testDescription - newPkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - newPkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, newPkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, newPkg) }) - - // Initial auto-generated label - obj.Labels = nil - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.old-pkg")) - - // Change package reference - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "new-pkg"} - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.new-pkg")) - }) - - It("should preserve other labels when auto-generating package label", func() { - obj.Namespace = defaultNamespace - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "preserve-pkg"} - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "preserve-pkg" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Function has existing labels - obj.Labels = map[string]string{ - "app": "my-app", - "version": "v1.0.0", - "environment": "production", - } - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.preserve-pkg")) - Expect(obj.Labels).To(HaveKeyWithValue("app", "my-app")) - Expect(obj.Labels).To(HaveKeyWithValue("version", "v1.0.0")) - Expect(obj.Labels).To(HaveKeyWithValue("environment", "production")) - }) - }) - - Context("Integration tests for automatic package label generation", func() { - It("should create function with auto-generated package label through webhook", func() { - obj.Namespace = defaultNamespace - obj.Name = "auto-gen-test-fn" - obj.Spec.PackageRef = fsv1alpha1.PackageRef{Name: "auto-gen-integration-pkg"} - - // Create the referenced package - pkg := &fsv1alpha1.Package{} - pkg.Name = "auto-gen-integration-pkg" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Apply defaulter to simulate webhook behavior - Expect(defaulter.Default(ctx, obj)).To(Succeed()) - - // Verify package label is auto-generated correctly - Expect(obj.Labels).To(HaveKeyWithValue("package", "default.auto-gen-integration-pkg")) - - // Simulate creating the function in the cluster - Expect(k8sClient.Create(ctx, obj)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, obj) }) - - // Verify the function was created with the auto-generated label - createdFn := &fsv1alpha1.Function{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: obj.Name, Namespace: obj.Namespace}, createdFn)).To(Succeed()) - Expect(createdFn.Labels).To(HaveKeyWithValue("package", "default.auto-gen-integration-pkg")) - }) - - It("should handle multiple functions with auto-generated package labels", func() { - // Create the package - pkg := &fsv1alpha1.Package{} - pkg.Name = "multi-fn-pkg" - pkg.Namespace = defaultNamespace - pkg.Spec.DisplayName = testDisplayName - pkg.Spec.Description = testDescription - pkg.Spec.FunctionType = fsv1alpha1.FunctionType{} - pkg.Spec.Modules = map[string]fsv1alpha1.Module{"test-module": {DisplayName: "mod", Description: "desc"}} - Expect(k8sClient.Create(ctx, pkg)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, pkg) }) - - // Create first function (no labels initially) - fn1 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multi-fn1", - Namespace: defaultNamespace, - }, - Spec: fsv1alpha1.FunctionSpec{ - DisplayName: "Function 1", - Description: "desc", - PackageRef: fsv1alpha1.PackageRef{Name: "multi-fn-pkg"}, - Module: "test-module", - }, - } - Expect(defaulter.Default(ctx, fn1)).To(Succeed()) - Expect(k8sClient.Create(ctx, fn1)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, fn1) }) - - // Create second function (with some existing labels) - fn2 := &fsv1alpha1.Function{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multi-fn2", - Namespace: defaultNamespace, - Labels: map[string]string{ - "app": "my-app", - "version": "v1.0.0", - }, - }, - Spec: fsv1alpha1.FunctionSpec{ - DisplayName: "Function 2", - Description: "desc", - PackageRef: fsv1alpha1.PackageRef{Name: "multi-fn-pkg"}, - Module: "test-module", - }, - } - Expect(defaulter.Default(ctx, fn2)).To(Succeed()) - Expect(k8sClient.Create(ctx, fn2)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, fn2) }) - - // Verify both functions have the correct auto-generated package label - createdFn1 := &fsv1alpha1.Function{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fn1.Name, Namespace: fn1.Namespace}, createdFn1)).To(Succeed()) - Expect(createdFn1.Labels).To(HaveKeyWithValue("package", "default.multi-fn-pkg")) - - createdFn2 := &fsv1alpha1.Function{} - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fn2.Name, Namespace: fn2.Namespace}, createdFn2)).To(Succeed()) - Expect(createdFn2.Labels).To(HaveKeyWithValue("package", "default.multi-fn-pkg")) - Expect(createdFn2.Labels).To(HaveKeyWithValue("app", "my-app")) - Expect(createdFn2.Labels).To(HaveKeyWithValue("version", "v1.0.0")) - }) - }) - -}) diff --git a/operator/internal/webhook/v1alpha1/packages_webhook.go b/operator/internal/webhook/v1alpha1/packages_webhook.go deleted file mode 100644 index dccf70ec..00000000 --- a/operator/internal/webhook/v1alpha1/packages_webhook.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" -) - -// nolint:unused -// log is for logging in this package. -var packageslog = logf.Log.WithName("package-resource") - -// SetupPackagesWebhookWithManager registers the webhook for Packages in the manager. -func SetupPackagesWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr).For(&fsv1alpha1.Package{}). - WithValidator(&PackagesCustomValidator{Client: mgr.GetClient()}). - WithDefaulter(&PackagesCustomDefaulter{}). - Complete() -} - -// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -// +kubebuilder:webhook:path=/mutate-fs-functionstream-github-io-v1alpha1-package,mutating=true,failurePolicy=fail,sideEffects=None,groups=fs.functionstream.github.io,resources=packages,verbs=create;update,versions=v1alpha1,name=mpackage-v1alpha1.kb.io,admissionReviewVersions=v1 - -// PackagesCustomDefaulter struct is responsible for setting default values on the custom resource of the -// Kind Packages when those are created or updated. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as it is used only for temporary operations and does not need to be deeply copied. -type PackagesCustomDefaulter struct { - // TODO(user): Add more fields as needed for defaulting -} - -var _ webhook.CustomDefaulter = &PackagesCustomDefaulter{} - -// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Packages. -func (d *PackagesCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { - packages, ok := obj.(*fsv1alpha1.Package) - - if !ok { - return fmt.Errorf("expected an Packages object but got %T", obj) - } - packageslog.Info("Defaulting for Packages", "name", packages.GetName()) - - // TODO(user): fill in your defaulting logic. - - return nil -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. -// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. -// +kubebuilder:webhook:path=/validate-fs-functionstream-github-io-v1alpha1-package,mutating=false,failurePolicy=fail,sideEffects=None,groups=fs.functionstream.github.io,resources=packages,verbs=create;update;delete,versions=v1alpha1,name=vpackage-v1alpha1.kb.io,admissionReviewVersions=v1 - -// PackagesCustomValidator struct is responsible for validating the Packages resource -// when it is created, updated, or deleted. -// -// NOTE: The +kubebuilder:object:generate=false marker prevents controller-gen from generating DeepCopy methods, -// as this struct is used only for temporary operations and does not need to be deeply copied. -type PackagesCustomValidator struct { - Client client.Client - // TODO(user): Add more fields as needed for validation -} - -var _ webhook.CustomValidator = &PackagesCustomValidator{} - -// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Packages. -func (v *PackagesCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - packages, ok := obj.(*fsv1alpha1.Package) - if !ok { - return nil, fmt.Errorf("expected a Packages object but got %T", obj) - } - packageslog.Info("Validation for Packages upon creation", "name", packages.GetName()) - - // TODO(user): fill in your validation logic upon object creation. - - return nil, nil -} - -func (v *PackagesCustomValidator) referencingFunctions(ctx context.Context, namespace, packageName string) ([]string, error) { - var functionList fsv1alpha1.FunctionList - err := v.Client.List(ctx, &functionList, client.InNamespace(namespace)) - if err != nil { - return nil, fmt.Errorf("failed to list Functions in namespace '%s': %w", namespace, err) - } - var referencing []string - packageLabel := fmt.Sprintf("%s.%s", namespace, packageName) - for _, fn := range functionList.Items { - if fn.Labels["package"] == packageLabel { - referencing = append(referencing, fn.Name) - } - } - return referencing, nil -} - -// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Packages. -func (v *PackagesCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - return nil, nil -} - -// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Packages. -func (v *PackagesCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - packages, ok := obj.(*fsv1alpha1.Package) - if !ok { - return nil, fmt.Errorf("expected a Packages object but got %T", obj) - } - packageslog.Info("Validation for Packages upon deletion", "name", packages.GetName()) - - if referencing, err := v.referencingFunctions(ctx, packages.Namespace, packages.Name); err != nil { - return nil, err - } else if len(referencing) > 0 { - return nil, fmt.Errorf("cannot delete Package '%s' because it is referenced by the following Functions in the same namespace: %v", packages.Name, referencing) - } - - return nil, nil -} diff --git a/operator/internal/webhook/v1alpha1/packages_webhook_test.go b/operator/internal/webhook/v1alpha1/packages_webhook_test.go deleted file mode 100644 index 21614ef7..00000000 --- a/operator/internal/webhook/v1alpha1/packages_webhook_test.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - // TODO (user): Add any additional imports if needed - "context" - "fmt" -) - -var _ = Describe("Packages Webhook", func() { - var ( - obj *fsv1alpha1.Package - oldObj *fsv1alpha1.Package - validator PackagesCustomValidator - defaulter PackagesCustomDefaulter - ctx context.Context - ) - - BeforeEach(func() { - ctx = context.Background() - obj = &fsv1alpha1.Package{} - oldObj = &fsv1alpha1.Package{} - obj.Name = "test-pkg" - obj.Namespace = "default" - obj.Spec.DisplayName = "test-pkg" - obj.Spec.Description = "desc" - obj.Spec.FunctionType = fsv1alpha1.FunctionType{} - obj.Spec.Modules = map[string]fsv1alpha1.Module{"mod": {DisplayName: "mod", Description: "desc"}} - oldObj.Name = obj.Name - oldObj.Namespace = obj.Namespace - oldObj.Spec = obj.Spec - validator = PackagesCustomValidator{Client: k8sClient} - Expect(validator).NotTo(BeNil(), "Expected validator to be initialized") - defaulter = PackagesCustomDefaulter{} - Expect(defaulter).NotTo(BeNil(), "Expected defaulter to be initialized") - Expect(oldObj).NotTo(BeNil(), "Expected oldObj to be initialized") - Expect(obj).NotTo(BeNil(), "Expected obj to be initialized") - // Clean up before each test - _ = k8sClient.Delete(ctx, obj) - }) - - AfterEach(func() { - _ = k8sClient.Delete(ctx, obj) - // TODO (user): Add any teardown logic common to all tests - }) - - Context("When creating Packages under Defaulting Webhook", func() { - // TODO (user): Add logic for defaulting webhooks - // Example: - // It("Should apply defaults when a required field is empty", func() { - // By("simulating a scenario where defaults should be applied") - // obj.SomeFieldWithDefault = "" - // By("calling the Default method to apply defaults") - // defaulter.Default(ctx, obj) - // By("checking that the default values are set") - // Expect(obj.SomeFieldWithDefault).To(Equal("default_value")) - // }) - }) - - Context("When creating or updating Packages under Validating Webhook", func() { - // TODO (user): Add logic for validating webhooks - // Example: - // It("Should deny creation if a required field is missing", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "" - // Expect(validator.ValidateCreate(ctx, obj)).Error().To(HaveOccurred()) - // }) - // - // It("Should admit creation if all required fields are present", func() { - // By("simulating an invalid creation scenario") - // obj.SomeRequiredField = "valid_value" - // Expect(validator.ValidateCreate(ctx, obj)).To(BeNil()) - // }) - // - // It("Should validate updates correctly", func() { - // By("simulating a valid update scenario") - // oldObj.SomeRequiredField = "updated_value" - // obj.SomeRequiredField = "updated_value" - // Expect(validator.ValidateUpdate(ctx, oldObj, obj)).To(BeNil()) - // }) - }) - - Context("Validating Webhook for update/delete with referencing Functions", func() { - - It("should deny delete if multiple Functions reference the Package", func() { - Expect(k8sClient.Create(ctx, obj)).To(Succeed()) - // Create two referencing Functions - fn1 := &fsv1alpha1.Function{ - ObjectMeta: fsv1alpha1.Function{}.ObjectMeta, - Spec: fsv1alpha1.FunctionSpec{ - DisplayName: "fn1", - Description: "desc", - PackageRef: fsv1alpha1.PackageRef{Name: obj.Name}, - Module: "mod", - }, - } - fn1.Name = "fn1" - fn1.Namespace = obj.Namespace - fn1.Labels = map[string]string{"package": fmt.Sprintf("%s.%s", obj.Namespace, obj.Name)} - fn2 := fn1.DeepCopy() - fn2.Name = "fn2" - Expect(k8sClient.Create(ctx, fn1)).To(Succeed()) - Expect(k8sClient.Create(ctx, fn2)).To(Succeed()) - DeferCleanup(func() { _ = k8sClient.Delete(ctx, fn1); _ = k8sClient.Delete(ctx, fn2) }) - _, err := validator.ValidateDelete(ctx, obj) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("referenced by the following Functions")) - Expect(err.Error()).To(ContainSubstring("fn1")) - Expect(err.Error()).To(ContainSubstring("fn2")) - }) - - It("should allow update and delete if no Function references the Package", func() { - Expect(k8sClient.Create(ctx, obj)).To(Succeed()) - _, err := validator.ValidateUpdate(ctx, oldObj, obj) - Expect(err).ToNot(HaveOccurred()) - _, err = validator.ValidateDelete(ctx, obj) - Expect(err).ToNot(HaveOccurred()) - }) - }) -}) diff --git a/operator/internal/webhook/v1alpha1/webhook_suite_test.go b/operator/internal/webhook/v1alpha1/webhook_suite_test.go deleted file mode 100644 index 3eae9fb2..00000000 --- a/operator/internal/webhook/v1alpha1/webhook_suite_test.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "os" - "path/filepath" - "testing" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/webhook" - - fsv1alpha1 "github.com/FunctionStream/function-stream/operator/api/v1alpha1" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var ( - ctx context.Context - cancel context.CancelFunc - k8sClient client.Client - cfg *rest.Config - testEnv *envtest.Environment -) - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Webhook Suite") -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - ctx, cancel = context.WithCancel(context.TODO()) - - var err error - err = fsv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: false, - - WebhookInstallOptions: envtest.WebhookInstallOptions{ - Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, - }, - } - - // Retrieve the first found binary directory to allow running tests from IDEs - if getFirstFoundEnvTestBinaryDir() != "" { - testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() - } - - // cfg is defined in this file globally. - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - - // start webhook server using Manager. - webhookInstallOptions := &testEnv.WebhookInstallOptions - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - WebhookServer: webhook.NewServer(webhook.Options{ - Host: webhookInstallOptions.LocalServingHost, - Port: webhookInstallOptions.LocalServingPort, - CertDir: webhookInstallOptions.LocalServingCertDir, - }), - LeaderElection: false, - Metrics: metricsserver.Options{BindAddress: "0"}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = SetupFunctionWebhookWithManager(mgr) - Expect(err).NotTo(HaveOccurred()) - - err = SetupPackagesWebhookWithManager(mgr) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:webhook - - go func() { - defer GinkgoRecover() - err = mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred()) - }() - - // wait for the webhook server to get ready. - dialer := &net.Dialer{Timeout: time.Second} - addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) - Eventually(func() error { - conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) - if err != nil { - return err - } - - return conn.Close() - }).Should(Succeed()) -}) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - cancel() - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) - -// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. -// ENVTEST-based tests depend on specific binaries, usually located in paths set by -// controller-runtime. When running tests directly (e.g., via an IDE) without using -// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. -// -// This function streamlines the process by finding the required binaries, similar to -// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are -// properly set up, run 'make setup-envtest' beforehand. -func getFirstFoundEnvTestBinaryDir() string { - basePath := filepath.Join("..", "..", "..", "bin", "k8s") - entries, err := os.ReadDir(basePath) - if err != nil { - logf.Log.Error(err, "Failed to read directory", "path", basePath) - return "" - } - for _, entry := range entries { - if entry.IsDir() { - return filepath.Join(basePath, entry.Name()) - } - } - return "" -} diff --git a/operator/scripts/deploy.yaml b/operator/scripts/deploy.yaml deleted file mode 100644 index 94508835..00000000 --- a/operator/scripts/deploy.yaml +++ /dev/null @@ -1,1137 +0,0 @@ ---- -# Source: operator/templates/namespace/namespace.yaml -apiVersion: v1 -kind: Namespace -metadata: - name: function-stream ---- -# Source: operator/templates/rbac/service_account.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: functionstream-operator - namespace: function-stream ---- -# Source: operator/templates/crd/fs.functionstream.github.io_functions.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - annotations: - "helm.sh/resource-policy": keep - controller-gen.kubebuilder.io/version: v0.17.2 - name: functions.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Function - listKind: FunctionList - plural: functions - singular: function - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Function is the Schema for the functions API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: FunctionSpec defines the desired state of Function - properties: - config: - additionalProperties: - x-kubernetes-preserve-unknown-fields: true - description: Configurations as key-value pairs - type: object - description: - description: Description of the function - type: string - displayName: - description: Display name of the function - type: string - module: - description: Module name - type: string - package: - description: Package name - type: string - replicas: - default: 1 - description: Number of replicas for the function deployment - format: int32 - type: integer - requestSource: - description: Request source - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sink: - description: Sink specifies the sink configuration - properties: - pulsar: - description: Pulsar sink specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - sources: - description: List of sources - items: - description: SourceSpec defines a source or sink specification - properties: - pulsar: - description: Pulsar source specification - properties: - topic: - description: Topic name - type: string - required: - - topic - type: object - type: object - type: array - subscriptionName: - type: string - required: - - module - - package - type: object - status: - description: FunctionStatus defines the observed state of Function - properties: - availableReplicas: - description: Number of available pods (ready for at least minReadySeconds) - format: int32 - type: integer - observedGeneration: - description: Most recent generation observed for this Function - format: int64 - type: integer - readyReplicas: - description: Total number of ready pods - format: int32 - type: integer - replicas: - description: Total number of non-terminated pods targeted by this - deployment - format: int32 - type: integer - updatedReplicas: - description: Total number of updated pods - format: int32 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: operator/templates/crd/fs.functionstream.github.io_packages.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - annotations: - "helm.sh/resource-policy": keep - controller-gen.kubebuilder.io/version: v0.17.2 - name: packages.fs.functionstream.github.io -spec: - group: fs.functionstream.github.io - names: - kind: Package - listKind: PackageList - plural: packages - shortNames: - - pkg - singular: package - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Package is the Schema for the packages API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: PackageSpec defines the desired state of Package - properties: - description: - description: Description provides additional information about the - package - type: string - displayName: - description: DisplayName is the human-readable name of the package - type: string - functionType: - description: FunctionType contains function type configuration - properties: - cloud: - description: Cloud contains cloud function package configuration - properties: - image: - description: Image specifies the container image for cloud - deployment - type: string - required: - - image - type: object - type: object - logo: - description: Logo is the URL or base64 encoded image for the package - logo - type: string - modules: - additionalProperties: - description: Module defines a module within a package - properties: - config: - additionalProperties: - description: ConfigItem defines a configuration item for a - module - properties: - description: - description: Description provides additional information - about the config item - type: string - displayName: - description: DisplayName is the human-readable name of - the config item - type: string - required: - description: Required indicates whether this config item - is mandatory - type: boolean - type: - description: Type specifies the data type of the config - item - type: string - type: object - description: Config is a list of configuration items for the - module - type: object - description: - description: Description provides additional information about - the module - type: string - displayName: - description: DisplayName is the human-readable name of the module - type: string - sinkSchema: - description: SinkSchema defines the output schema for the module - type: string - sourceSchema: - description: SourceSchema defines the input schema for the module - type: string - type: object - description: Modules is a map of module names to their configurations - type: object - required: - - functionType - - modules - type: object - status: - description: PackageStatus defines the observed state of Package. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: operator/templates/rbac/function_admin_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: function-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get ---- -# Source: operator/templates/rbac/function_editor_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: function-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get ---- -# Source: operator/templates/rbac/function_viewer_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: function-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - verbs: - - get ---- -# Source: operator/templates/rbac/metrics_auth_role.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: fs-operator-metrics-auth-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -# Source: operator/templates/rbac/metrics_reader_role.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: fs-operator-metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get ---- -# Source: operator/templates/rbac/packages_admin_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants full permissions ('*') over fs.functionstream.github.io. -# This role is intended for users authorized to modify roles and bindings within the cluster, -# enabling them to delegate specific permissions to other users or groups as needed. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: packages-admin-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - '*' -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get ---- -# Source: operator/templates/rbac/packages_editor_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants permissions to create, update, and delete resources within the fs.functionstream.github.io. -# This role is intended for users who need to manage these resources -# but should not control RBAC or manage permissions for others. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: packages-editor-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get ---- -# Source: operator/templates/rbac/packages_viewer_role.yaml -# This rule is not used by the project operator itself. -# It is provided to allow the cluster admin to help manage permissions for users. -# -# Grants read-only access to fs.functionstream.github.io resources. -# This role is intended for users who need visibility into these resources -# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: packages-viewer-role -rules: -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - packages/status - verbs: - - get ---- -# Source: operator/templates/rbac/role.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: fs-operator-manager-role -rules: -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions - - package - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/finalizers - - package/finalizers - verbs: - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - functions/status - - package/status - verbs: - - get - - patch - - update -- apiGroups: - - fs.functionstream.github.io - resources: - - packages - verbs: - - get - - list - - watch ---- -# Source: operator/templates/rbac/metrics_auth_role_binding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: functionstream-fs-operator-metrics-auth-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: fs-operator-metrics-auth-role -subjects: -- kind: ServiceAccount - name: functionstream-operator - namespace: function-stream ---- -# Source: operator/templates/rbac/role_binding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: functionstream-fs-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: fs-operator-manager-role -subjects: -- kind: ServiceAccount - name: functionstream-operator - namespace: function-stream ---- -# Source: operator/templates/rbac/leader_election_role.yaml -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - namespace: function-stream - name: fs-operator-leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -# Source: operator/templates/rbac/leader_election_role_binding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - namespace: function-stream - name: functionstream-fs-operator-leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: fs-operator-leader-election-role -subjects: -- kind: ServiceAccount - name: functionstream-operator - namespace: function-stream ---- -# Source: operator/templates/metrics/metrics-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: operator-controller-manager-metrics-service - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm -spec: - ports: - - port: 8443 - targetPort: 8443 - protocol: TCP - name: https - selector: - control-plane: controller-manager ---- -# Source: operator/templates/pulsar/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: functionstream-pulsar-standalone - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - app: pulsar-standalone -spec: - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - protocol: TCP - - name: admin - port: 8080 - targetPort: 8080 - protocol: TCP - selector: - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app: pulsar-standalone ---- -# Source: operator/templates/webhook/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: operator-webhook-service - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm -spec: - ports: - - port: 443 - protocol: TCP - targetPort: 9443 - selector: - control-plane: controller-manager ---- -# Source: operator/templates/manager/manager.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: function-stream - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - control-plane: controller-manager -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - control-plane: controller-manager - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - control-plane: controller-manager - spec: - containers: - - name: manager - args: - - --leader-elect - - --metrics-bind-address=:8443 - - --health-probe-bind-address=:8081 - command: - - /manager - image: functionstream/operator:latest - imagePullPolicy: IfNotPresent - env: - - name: PULSAR_SERVICE_URL - value: pulsar://functionstream-pulsar-standalone.function-stream.svc.cluster.local:6650 - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - volumeMounts: - - name: webhook-cert - mountPath: /tmp/k8s-webhook-server/serving-certs - readOnly: true - - name: metrics-certs - mountPath: /tmp/k8s-metrics-server/metrics-certs - readOnly: true - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: functionstream-operator - terminationGracePeriodSeconds: 10 - volumes: - - name: webhook-cert - secret: - secretName: webhook-server-cert - - name: metrics-certs - secret: - secretName: metrics-server-cert ---- -# Source: operator/templates/pulsar/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: functionstream-pulsar-standalone - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - app: pulsar-standalone - app.kubernetes.io/component: messaging -spec: - serviceName: functionstream-pulsar-standalone - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app: pulsar-standalone - template: - metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - app: pulsar-standalone - app.kubernetes.io/component: messaging - spec: - containers: - - name: pulsar - image: apachepulsar/pulsar:latest - command: - - sh - - -c - - | - # Initialize Pulsar standalone - bin/pulsar standalone -nfw -nss - ports: - - name: pulsar - containerPort: 6650 - protocol: TCP - - name: admin - containerPort: 8080 - protocol: TCP - resources: - requests: - cpu: 500m - memory: 1Gi - livenessProbe: - httpGet: - path: /admin/v2/brokers/health - port: 8080 - initialDelaySeconds: 60 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /admin/v2/brokers/health - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 3 ---- -# Source: operator/templates/prometheus/monitor.yaml -# To integrate with Prometheus. ---- -# Source: operator/templates/certmanager/certificate.yaml -# Certificate for the webhook -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - annotations: - "helm.sh/resource-policy": keep - name: serving-cert - namespace: function-stream - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm -spec: - dnsNames: - - operator.function-stream.svc - - operator.function-stream.svc.cluster.local - - operator-webhook-service.function-stream.svc - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: webhook-server-cert ---- -# Source: operator/templates/certmanager/certificate.yaml -# Certificate for the metrics -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - annotations: - "helm.sh/resource-policy": keep - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: metrics-certs - namespace: function-stream -spec: - dnsNames: - - operator.function-stream.svc - - operator.function-stream.svc.cluster.local - - operator-metrics-service.function-stream.svc - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: metrics-server-cert ---- -# Source: operator/templates/certmanager/certificate.yaml -# Self-signed Issuer -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm - name: selfsigned-issuer - namespace: function-stream -spec: - selfSigned: {} ---- -# Source: operator/templates/webhook/webhooks.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: operator-mutating-webhook-configuration - namespace: function-stream - annotations: - cert-manager.io/inject-ca-from: "function-stream/serving-cert" - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm -webhooks: - - name: mfunction-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: function-stream - path: /mutate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - functions - - name: mpackage-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: function-stream - path: /mutate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - packages ---- -# Source: operator/templates/webhook/webhooks.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: operator-validating-webhook-configuration - namespace: function-stream - annotations: - cert-manager.io/inject-ca-from: "function-stream/serving-cert" - labels: - app.kubernetes.io/version: "0.1.0" - helm.sh/chart: "0.1.0" - app.kubernetes.io/name: operator - app.kubernetes.io/instance: functionstream - app.kubernetes.io/managed-by: Helm -webhooks: - - name: vfunction-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: function-stream - path: /validate-fs-functionstream-github-io-v1alpha1-function - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - - DELETE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - functions - - name: vpackage-v1alpha1.kb.io - clientConfig: - service: - name: operator-webhook-service - namespace: function-stream - path: /validate-fs-functionstream-github-io-v1alpha1-package - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1 - rules: - - operations: - - CREATE - - UPDATE - - DELETE - apiGroups: - - fs.functionstream.github.io - apiVersions: - - v1alpha1 - resources: - - packages diff --git a/operator/scripts/install-cert-manager.sh b/operator/scripts/install-cert-manager.sh deleted file mode 100755 index f61fae5d..00000000 --- a/operator/scripts/install-cert-manager.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# FunctionStream Operator - cert-manager installation script -# This script installs cert-manager which is required for the operator to work properly - -set -e - -echo "FunctionStream Operator - cert-manager installation script" -echo "==========================================================" - -# Check if kubectl is available -if ! command -v kubectl &> /dev/null; then - echo "Error: kubectl is not installed or not in PATH" - exit 1 -fi - -# Check if we can connect to the cluster -if ! kubectl cluster-info &> /dev/null; then - echo "Error: Cannot connect to Kubernetes cluster" - exit 1 -fi - -echo "Installing cert-manager..." - -# Install cert-manager -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.yaml - -echo "Waiting for cert-manager to be ready..." - -# Wait for cert-manager namespace to be created -kubectl wait --for=jsonpath='{.status.phase}=Active' namespace/cert-manager --timeout=60s - -# Wait for cert-manager pods to be ready -kubectl wait --for=jsonpath='{.status.phase}=Running' pods -l app.kubernetes.io/instance=cert-manager -n cert-manager --timeout=300s - - -echo "cert-manager installation completed successfully!" -echo "" -echo "You can now install the FunctionStream operator:" -echo " helm install fs ./deploy/chart -n fs --create-namespace" -echo "" -echo "Or if you want to install with Pulsar standalone:" -echo " helm install fs ./deploy/chart --set pulsar.standalone.enable=true -n fs --create-namespace" \ No newline at end of file diff --git a/operator/test/e2e/e2e_suite_test.go b/operator/test/e2e/e2e_suite_test.go deleted file mode 100644 index afc21396..00000000 --- a/operator/test/e2e/e2e_suite_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "fmt" - "os" - "os/exec" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/FunctionStream/function-stream/operator/test/utils" -) - -var ( - // Optional Environment Variables: - // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. - // These variables are useful if CertManager is already installed, avoiding - // re-installation and conflicts. - skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" - // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster - isCertManagerAlreadyInstalled = false - - // projectImage is the name of the image which will be build and loaded - // with the code source changes to be tested. - projectImage = "example.com/operator:v0.0.1" -) - -// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, -// temporary environment to validate project changes with the purposed to be used in CI jobs. -// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs -// CertManager. -func TestE2E(t *testing.T) { - RegisterFailHandler(Fail) - _, _ = fmt.Fprintf(GinkgoWriter, "Starting operator integration test suite\n") - RunSpecs(t, "e2e suite") -} - -var _ = BeforeSuite(func() { - By("building the manager(Operator) image") - cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) - _, err := utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") - - // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is - // built and available before running the tests. Also, remove the following block. - By("loading the manager(Operator) image on Kind") - err = utils.LoadImageToKindClusterWithName(projectImage) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") - - // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. - // To prevent errors when tests run in environments with CertManager already installed, - // we check for its presence before execution. - // Setup CertManager before the suite if not skipped and if not already installed - if !skipCertManagerInstall { - By("checking if cert manager is installed already") - isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() - if !isCertManagerAlreadyInstalled { - _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") - Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") - } - } -}) - -var _ = AfterSuite(func() { - // Teardown CertManager after the suite if not skipped and if it was not already installed - if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { - _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") - utils.UninstallCertManager() - } -}) diff --git a/operator/test/e2e/e2e_test.go b/operator/test/e2e/e2e_test.go deleted file mode 100644 index 30b55f7f..00000000 --- a/operator/test/e2e/e2e_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/FunctionStream/function-stream/operator/test/utils" -) - -// namespace where the project is deployed in -const namespace = "operator-system" - -// serviceAccountName created for the project -const serviceAccountName = "operator-controller-manager" - -// metricsServiceName is the name of the metrics service of the project -const metricsServiceName = "operator-controller-manager-metrics-service" - -// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data -const metricsRoleBindingName = "operator-metrics-binding" - -var _ = Describe("Manager", Ordered, func() { - var controllerPodName string - - // Before running the tests, set up the environment by creating the namespace, - // enforce the restricted security policy to the namespace, installing CRDs, - // and deploying the controller. - BeforeAll(func() { - By("creating manager namespace") - cmd := exec.Command("kubectl", "create", "ns", namespace) - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") - - By("labeling the namespace to enforce the restricted security policy") - cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace, - "pod-security.kubernetes.io/enforce=restricted") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy") - - By("installing CRDs") - cmd = exec.Command("make", "install") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") - - By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") - }) - - // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, - // and deleting the namespace. - AfterAll(func() { - By("cleaning up the curl pod for metrics") - cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) - _, _ = utils.Run(cmd) - - By("undeploying the controller-manager") - cmd = exec.Command("make", "undeploy") - _, _ = utils.Run(cmd) - - By("uninstalling CRDs") - cmd = exec.Command("make", "uninstall") - _, _ = utils.Run(cmd) - - By("removing manager namespace") - cmd = exec.Command("kubectl", "delete", "ns", namespace) - _, _ = utils.Run(cmd) - }) - - // After each test, check for failures and collect logs, events, - // and pod descriptions for debugging. - AfterEach(func() { - specReport := CurrentSpecReport() - if specReport.Failed() { - By("Fetching controller manager pod logs") - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) - controllerLogs, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) - } - - By("Fetching Kubernetes events") - cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") - eventsOutput, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) - } - - By("Fetching curl-metrics logs") - cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) - metricsOutput, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) - } - - By("Fetching controller manager pod description") - cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) - podDescription, err := utils.Run(cmd) - if err == nil { - fmt.Println("Pod description:\n", podDescription) - } else { - fmt.Println("Failed to describe controller pod") - } - } - }) - - SetDefaultEventuallyTimeout(2 * time.Minute) - SetDefaultEventuallyPollingInterval(time.Second) - - Context("Manager", func() { - It("should run successfully", func() { - By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func(g Gomega) { - // Get the name of the controller-manager pod - cmd := exec.Command("kubectl", "get", - "pods", "-l", "control-plane=controller-manager", - "-o", "go-template={{ range .items }}"+ - "{{ if not .metadata.deletionTimestamp }}"+ - "{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", namespace, - ) - - podOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") - podNames := utils.GetNonEmptyLines(podOutput) - g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") - controllerPodName = podNames[0] - g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) - - // Validate the pod's status - cmd = exec.Command("kubectl", "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", namespace, - ) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") - } - Eventually(verifyControllerUp).Should(Succeed()) - }) - - It("should ensure the metrics endpoint is serving metrics", func() { - By("creating a ClusterRoleBinding for the service account to allow access to metrics") - cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, - "--clusterrole=operator-metrics-reader", - fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), - ) - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") - - By("validating that the metrics service is available") - cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") - - By("getting the service account token") - token, err := serviceAccountToken() - Expect(err).NotTo(HaveOccurred()) - Expect(token).NotTo(BeEmpty()) - - By("waiting for the metrics endpoint to be ready") - verifyMetricsEndpointReady := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready") - } - Eventually(verifyMetricsEndpointReady).Should(Succeed()) - - By("verifying that the controller manager is serving the metrics server") - verifyMetricsServerStarted := func(g Gomega) { - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), - "Metrics server not yet started") - } - Eventually(verifyMetricsServerStarted).Should(Succeed()) - - By("creating the curl-metrics pod to access the metrics endpoint") - cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", - "--namespace", namespace, - "--image=curlimages/curl:latest", - "--overrides", - fmt.Sprintf(`{ - "spec": { - "containers": [{ - "name": "curl", - "image": "curlimages/curl:latest", - "command": ["/bin/sh", "-c"], - "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": { - "drop": ["ALL"] - }, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": { - "type": "RuntimeDefault" - } - } - }], - "serviceAccount": "%s" - } - }`, token, metricsServiceName, namespace, serviceAccountName)) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") - - By("waiting for the curl-metrics pod to complete.") - verifyCurlUp := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", - "-o", "jsonpath={.status.phase}", - "-n", namespace) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") - } - Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) - - By("getting the metrics by checking curl-metrics logs") - metricsOutput := getMetricsOutput() - Expect(metricsOutput).To(ContainSubstring( - "controller_runtime_reconcile_total", - )) - }) - - It("should provisioned cert-manager", func() { - By("validating that cert-manager has the certificate Secret") - verifyCertManager := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "secrets", "webhook-server-cert", "-n", namespace) - _, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - } - Eventually(verifyCertManager).Should(Succeed()) - }) - - It("should have CA injection for mutating webhooks", func() { - By("checking CA injection for mutating webhooks") - verifyCAInjection := func(g Gomega) { - cmd := exec.Command("kubectl", "get", - "mutatingwebhookconfigurations.admissionregistration.k8s.io", - "operator-mutating-webhook-configuration", - "-o", "go-template={{ range .webhooks }}{{ .clientConfig.caBundle }}{{ end }}") - mwhOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(mwhOutput)).To(BeNumerically(">", 10)) - } - Eventually(verifyCAInjection).Should(Succeed()) - }) - - It("should have CA injection for validating webhooks", func() { - By("checking CA injection for validating webhooks") - verifyCAInjection := func(g Gomega) { - cmd := exec.Command("kubectl", "get", - "validatingwebhookconfigurations.admissionregistration.k8s.io", - "operator-validating-webhook-configuration", - "-o", "go-template={{ range .webhooks }}{{ .clientConfig.caBundle }}{{ end }}") - vwhOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(vwhOutput)).To(BeNumerically(">", 10)) - } - Eventually(verifyCAInjection).Should(Succeed()) - }) - - // +kubebuilder:scaffold:e2e-webhooks-checks - - // TODO: Customize the e2e test suite with scenarios specific to your project. - // Consider applying sample/CR(s) and check their status and/or verifying - // the reconciliation by using the metrics, i.e.: - // metricsOutput := getMetricsOutput() - // Expect(metricsOutput).To(ContainSubstring( - // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, - // strings.ToLower(), - // )) - }) -}) - -// serviceAccountToken returns a token for the specified service account in the given namespace. -// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request -// and parsing the resulting token from the API response. -func serviceAccountToken() (string, error) { - const tokenRequestRawString = `{ - "apiVersion": "authentication.k8s.io/v1", - "kind": "TokenRequest" - }` - - // Temporary file to store the token request - secretName := fmt.Sprintf("%s-token-request", serviceAccountName) - tokenRequestFile := filepath.Join("/tmp", secretName) - err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) - if err != nil { - return "", err - } - - var out string - verifyTokenCreation := func(g Gomega) { - // Execute kubectl command to create the token - cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( - "/api/v1/namespaces/%s/serviceaccounts/%s/token", - namespace, - serviceAccountName, - ), "-f", tokenRequestFile) - - output, err := cmd.CombinedOutput() - g.Expect(err).NotTo(HaveOccurred()) - - // Parse the JSON output to extract the token - var token tokenRequest - err = json.Unmarshal(output, &token) - g.Expect(err).NotTo(HaveOccurred()) - - out = token.Status.Token - } - Eventually(verifyTokenCreation).Should(Succeed()) - - return out, err -} - -// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. -func getMetricsOutput() string { - By("getting the curl-metrics logs") - cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) - metricsOutput, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") - Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) - return metricsOutput -} - -// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, -// containing only the token field that we need to extract. -type tokenRequest struct { - Status struct { - Token string `json:"token"` - } `json:"status"` -} diff --git a/operator/test/utils/utils.go b/operator/test/utils/utils.go deleted file mode 100644 index 04a5141c..00000000 --- a/operator/test/utils/utils.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" - - . "github.com/onsi/ginkgo/v2" //nolint:golint,revive -) - -const ( - prometheusOperatorVersion = "v0.77.1" - prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + - "releases/download/%s/bundle.yaml" - - certmanagerVersion = "v1.16.3" - certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" -) - -func warnError(err error) { - _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) -} - -// Run executes the provided command within this context -func Run(cmd *exec.Cmd) (string, error) { - dir, _ := GetProjectDir() - cmd.Dir = dir - - if err := os.Chdir(cmd.Dir); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) - } - - cmd.Env = append(os.Environ(), "GO111MODULE=on") - command := strings.Join(cmd.Args, " ") - _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command) - output, err := cmd.CombinedOutput() - if err != nil { - return string(output), fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) - } - - return string(output), nil -} - -// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. -func InstallPrometheusOperator() error { - url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) - cmd := exec.Command("kubectl", "create", "-f", url) - _, err := Run(cmd) - return err -} - -// UninstallPrometheusOperator uninstalls the prometheus -func UninstallPrometheusOperator() { - url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) - cmd := exec.Command("kubectl", "delete", "-f", url) - if _, err := Run(cmd); err != nil { - warnError(err) - } -} - -// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed -// by verifying the existence of key CRDs related to Prometheus. -func IsPrometheusCRDsInstalled() bool { - // List of common Prometheus CRDs - prometheusCRDs := []string{ - "prometheuses.monitoring.coreos.com", - "prometheusrules.monitoring.coreos.com", - "prometheusagents.monitoring.coreos.com", - } - - cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") - output, err := Run(cmd) - if err != nil { - return false - } - crdList := GetNonEmptyLines(output) - for _, crd := range prometheusCRDs { - for _, line := range crdList { - if strings.Contains(line, crd) { - return true - } - } - } - - return false -} - -// UninstallCertManager uninstalls the cert manager -func UninstallCertManager() { - url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "delete", "-f", url) - if _, err := Run(cmd); err != nil { - warnError(err) - } -} - -// InstallCertManager installs the cert manager bundle. -func InstallCertManager() error { - url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "apply", "-f", url) - if _, err := Run(cmd); err != nil { - return err - } - // Wait for cert-manager-webhook to be ready, which can take time if cert-manager - // was re-installed after uninstalling on a cluster. - cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", - "--for", "condition=Available", - "--namespace", "cert-manager", - "--timeout", "5m", - ) - - _, err := Run(cmd) - return err -} - -// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed -// by verifying the existence of key CRDs related to Cert Manager. -func IsCertManagerCRDsInstalled() bool { - // List of common Cert Manager CRDs - certManagerCRDs := []string{ - "certificates.cert-manager.io", - "issuers.cert-manager.io", - "clusterissuers.cert-manager.io", - "certificaterequests.cert-manager.io", - "orders.acme.cert-manager.io", - "challenges.acme.cert-manager.io", - } - - // Execute the kubectl command to get all CRDs - cmd := exec.Command("kubectl", "get", "crds") - output, err := Run(cmd) - if err != nil { - return false - } - - // Check if any of the Cert Manager CRDs are present - crdList := GetNonEmptyLines(output) - for _, crd := range certManagerCRDs { - for _, line := range crdList { - if strings.Contains(line, crd) { - return true - } - } - } - - return false -} - -// LoadImageToKindClusterWithName loads a local docker image to the kind cluster -func LoadImageToKindClusterWithName(name string) error { - cluster := "kind" - if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { - cluster = v - } - kindOptions := []string{"load", "docker-image", name, "--name", cluster} - cmd := exec.Command("kind", kindOptions...) - _, err := Run(cmd) - return err -} - -// GetNonEmptyLines converts given command output string into individual objects -// according to line breakers, and ignores the empty elements in it. -func GetNonEmptyLines(output string) []string { - var res []string - elements := strings.Split(output, "\n") - for _, element := range elements { - if element != "" { - res = append(res, element) - } - } - - return res -} - -// GetProjectDir will return the directory where the project is -func GetProjectDir() (string, error) { - wd, err := os.Getwd() - if err != nil { - return wd, err - } - wd = strings.Replace(wd, "/test/e2e", "", -1) - return wd, nil -} - -// UncommentCode searches for target in the file and remove the comment prefix -// of the target content. The target content may span multiple lines. -func UncommentCode(filename, target, prefix string) error { - // false positive - // nolint:gosec - content, err := os.ReadFile(filename) - if err != nil { - return err - } - strContent := string(content) - - idx := strings.Index(strContent, target) - if idx < 0 { - return fmt.Errorf("unable to find the code %s to be uncomment", target) - } - - out := new(bytes.Buffer) - _, err = out.Write(content[:idx]) - if err != nil { - return err - } - - scanner := bufio.NewScanner(bytes.NewBufferString(target)) - if !scanner.Scan() { - return nil - } - for { - _, err := out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)) - if err != nil { - return err - } - // Avoid writing a newline in case the previous line was the last in target. - if !scanner.Scan() { - break - } - if _, err := out.WriteString("\n"); err != nil { - return err - } - } - - _, err = out.Write(content[idx+len(target):]) - if err != nil { - return err - } - // false positive - // nolint:gosec - return os.WriteFile(filename, out.Bytes(), 0644) -} diff --git a/operator/utils/util.go b/operator/utils/util.go deleted file mode 100644 index 67fd1658..00000000 --- a/operator/utils/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package utils - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" -) - -// HandleReconcileError handles errors in reconcile loops, logging conflicts as info and returning nil error for them. -func HandleReconcileError(log logr.Logger, err error, conflictMsg string) (ctrl.Result, error) { - if errors.IsConflict(err) { - log.V(1).Info(conflictMsg, "error", err) - return ctrl.Result{}, nil - } - return ctrl.Result{}, err -} diff --git a/perf/perf.go b/perf/perf.go deleted file mode 100644 index 8c8c27ef..00000000 --- a/perf/perf.go +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package perf - -import ( - "context" - "encoding/json" - "fmt" - "io" - "log/slog" - "math/rand" - "os" - "strconv" - "sync/atomic" - "time" - - "github.com/bmizerany/perks/quantile" - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/admin/utils" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/fs/contube" - "golang.org/x/time/rate" -) - -type TubeBuilder func(ctx context.Context) (contube.TubeFactory, error) - -type Config struct { - PulsarURL string - RequestRate float64 - Func *adminclient.ModelFunction - QueueBuilder TubeBuilder -} - -type Perf interface { - Run(context.Context) -} - -type perf struct { - config *Config - input chan<- contube.Record - output <-chan contube.Record - tubeBuilder TubeBuilder -} - -func New(config *Config) Perf { - p := &perf{ - config: config, - } - if config.QueueBuilder == nil { - p.tubeBuilder = func(ctx context.Context) (contube.TubeFactory, error) { - return contube.NewPulsarEventQueueFactory(ctx, (&contube.PulsarTubeFactoryConfig{ - PulsarURL: config.PulsarURL, - }).ToConfigMap()) - } - } else { - p.tubeBuilder = config.QueueBuilder - } - return p -} - -type Person struct { - Name string `json:"name"` - Money int `json:"money"` - Expected int `json:"expected"` -} - -func (p *perf) Run(ctx context.Context) { - slog.Info( - "Starting Function stream perf client", - slog.Any("config", p.config), - ) - - name := "perf-" + strconv.Itoa(rand.Int()) - var f adminclient.ModelFunction - if p.config.Func != nil { - f = *p.config.Func - } else { - f = adminclient.ModelFunction{ - Runtime: adminclient.ModelRuntimeConfig{ - Config: map[string]interface{}{ - common.RuntimeArchiveConfigKey: "./bin/example_basic.wasm", - }, - }, - Source: utils.MakeMemorySourceTubeConfig("test-input-" + strconv.Itoa(rand.Int())), - Sink: *utils.MakeMemorySinkTubeConfig("test-output-" + strconv.Itoa(rand.Int())), - } - } - f.Name = name - - queueFactory, err := p.tubeBuilder(ctx) - if err != nil { - slog.Error( - "Failed to create Record Queue Factory", - slog.Any("error", err), - ) - os.Exit(1) - } - - inputTopic, err := utils.GetInputTopics(&f) - if err != nil { - slog.Error( - "Failed to get input topics", - slog.Any("error", err), - ) - os.Exit(1) - - } - p.input, err = queueFactory.NewSinkTube(ctx, (&contube.SinkQueueConfig{ - Topic: inputTopic[0], - }).ToConfigMap()) - if err != nil { - slog.Error( - "Failed to create Sink Perf Channel", - slog.Any("error", err), - ) - os.Exit(1) - } - - outputTopic, err := utils.GetOutputTopic(&f) - if err != nil { - slog.Error( - "Failed to get output topic", - slog.Any("error", err), - ) - os.Exit(1) - } - p.output, err = queueFactory.NewSourceTube(ctx, (&contube.SourceQueueConfig{ - Topics: []string{outputTopic}, - SubName: "perf", - }).ToConfigMap()) - if err != nil { - slog.Error( - "Failed to create Sources Perf Channel", - slog.Any("error", err), - ) - os.Exit(1) - } - - cfg := adminclient.NewConfiguration() - cli := adminclient.NewAPIClient(cfg) - - res, err := cli.FunctionAPI.CreateFunction(context.Background()).Body(f).Execute() - if err != nil { - body, _ := io.ReadAll(res.Body) - slog.Error( - "Failed to create Create Function", - slog.Any("error", err), - slog.Any("body", body), - ) - os.Exit(1) - } - - defer func() { - res, err := cli.FunctionAPI.DeleteFunction(context.Background(), name).Execute() - if err != nil { - slog.Error( - "Failed to delete Function", - slog.Any("error", err), - ) - os.Exit(1) - } - if res.StatusCode != 200 { - slog.Error( - "Failed to delete Function", - slog.Any("statusCode", res.StatusCode), - ) - os.Exit(1) - } - }() - - latencyCh := make(chan int64) - var failureCount int64 - go p.generateTraffic(ctx, latencyCh, &failureCount) - - reportInterval := time.Second - ticker := time.NewTicker(reportInterval) - defer ticker.Stop() - - q := quantile.NewTargeted(0.50, 0.95, 0.99, 0.999, 1.0) - ops := 0 - for { - select { - case <-ticker.C: - slog.Info(fmt.Sprintf(`Stats - Total ops: %6.1f ops/s - Failed ops: %6.1f ops/s - Latency ms: 50%% %5.1f - 95%% %5.1f - 99%% %5.1f - 99.9%% %5.1f - max %6.1f`, - float64(ops)/float64(reportInterval/time.Second), - float64(failureCount)/float64(reportInterval/time.Second), - q.Query(0.5), - q.Query(0.95), - q.Query(0.99), - q.Query(0.999), - q.Query(1.0), - )) - q.Reset() - ops = 0 - atomic.StoreInt64(&failureCount, 0) - case l := <-latencyCh: - ops++ - q.Insert(float64(l) / 1000.0) // Convert to millis - case <-ctx.Done(): - slog.InfoContext(ctx, "Shutting down perf client") - return - } - } - -} - -func (p *perf) generateTraffic(ctx context.Context, latencyCh chan int64, failureCount *int64) { - limiter := rate.NewLimiter(rate.Limit(p.config.RequestRate), int(p.config.RequestRate)) - - count := 0 - for { - if err := limiter.Wait(ctx); err != nil { - return - } - c := count - count++ - person := Person{Name: "rbt", Money: c, Expected: c + 1} - jsonBytes, err := json.Marshal(person) - if err != nil { - slog.Error( - "Failed to marshal Person", - slog.Any("error", err), - ) - os.Exit(1) - } - start := time.Now() - if !common.SendToChannel(ctx, p.input, contube.NewRecordImpl(jsonBytes, func() {})) { - return - } - go func() { - e, ok := common.ReceiveFromChannel(ctx, p.output) - if !ok { - return - } - latencyCh <- time.Since(start).Microseconds() - payload := e.GetPayload() - e.Commit() - var out Person - err = json.Unmarshal(payload, &out) - if err != nil { - slog.Error( - "Failed to unmarshal Person", - slog.Any("error", err), - slog.Any("payload", payload), - ) - os.Exit(1) - } - if out.Money != out.Expected { - slog.Error( - "Unexpected value for money", - slog.Any("money", out.Money), - ) - atomic.AddInt64(failureCount, 1) - } - }() - } -} diff --git a/protocol/Cargo.toml b/protocol/Cargo.toml new file mode 100644 index 00000000..fde9de52 --- /dev/null +++ b/protocol/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "protocol" +version = "0.1.0" +edition = "2024" +description = "Protocol Buffers protocol definitions for function stream" +license = "MIT OR Apache-2.0" +repository = "https://github.com/your-username/rust-function-stream" + +[dependencies] +prost = "0.13" +tonic = { version = "0.12", features = ["default"] } +log = "0.4" + +[build-dependencies] +tonic-build = "0.12" +env_logger = "0.10" +log = "0.4" diff --git a/protocol/build.rs b/protocol/build.rs new file mode 100644 index 00000000..17e77d30 --- /dev/null +++ b/protocol/build.rs @@ -0,0 +1,63 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::path::Path; + +fn main() -> Result<(), Box> { + // Initialize logger for build script + env_logger::init(); + + // Create output directories in the protocol package directory + // Use CARGO_MANIFEST_DIR to get the package root directory + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR")?; + let out_dir = Path::new(&manifest_dir).join("generated"); + let proto_file = Path::new(&manifest_dir).join("proto/function_stream.proto"); + + // Note: Cargo doesn't directly support cleaning custom directories via cargo clean. + // The generated directory will be automatically regenerated on each build if needed. + // To clean it manually, use: ./clean.sh or make clean or rm -rf protocol/generated + + log::info!("Generated code will be placed in: {}", out_dir.display()); + log::info!("Proto file: {}", proto_file.display()); + + // Create output directories + let cli_dir = out_dir.join("cli"); + let service_dir = out_dir.join("service"); + + std::fs::create_dir_all(&cli_dir)?; + std::fs::create_dir_all(&service_dir)?; + log::info!( + "Created output directories: {} and {}", + cli_dir.display(), + service_dir.display() + ); + + // Generate code for CLI - only client code needed + tonic_build::configure() + .out_dir(&cli_dir) + .build_client(true) // Enable client code generation + .build_server(false) // Disable server code generation + .compile_protos(&["proto/function_stream.proto"], &["proto"])?; + + // Generate code for Service - only server code needed + tonic_build::configure() + .out_dir(&service_dir) + .build_client(false) // Disable client code generation + .build_server(true) // Enable server code generation + .compile_protos(&["proto/function_stream.proto"], &["proto"])?; + + log::info!("Protocol Buffers code generated successfully"); + println!("cargo:rustc-env=PROTO_GEN_DIR={}", out_dir.display()); + println!("cargo:rerun-if-changed={}", proto_file.display()); + + Ok(()) +} diff --git a/protocol/proto/function_stream.proto b/protocol/proto/function_stream.proto new file mode 100644 index 00000000..08dc7591 --- /dev/null +++ b/protocol/proto/function_stream.proto @@ -0,0 +1,136 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Function Stream Protocol Buffers Definition +// This file defines the gRPC interfaces for Function Stream service + +syntax = "proto3"; + +package function_stream; + +// Status codes for API responses +enum StatusCode { + // Unknown status (default) + STATUS_UNKNOWN = 0; + + // Success codes (2xx) + OK = 200; + CREATED = 201; + ACCEPTED = 202; + NO_CONTENT = 204; + + // Client error codes (4xx) + BAD_REQUEST = 400; + UNAUTHORIZED = 401; + FORBIDDEN = 403; + NOT_FOUND = 404; + METHOD_NOT_ALLOWED = 405; + CONFLICT = 409; + UNPROCESSABLE_ENTITY = 422; + TOO_MANY_REQUESTS = 429; + + // Server error codes (5xx) + INTERNAL_SERVER_ERROR = 500; + NOT_IMPLEMENTED = 501; + BAD_GATEWAY = 502; + SERVICE_UNAVAILABLE = 503; + GATEWAY_TIMEOUT = 504; +} + +// Unified response structure +message Response { + StatusCode status_code = 1; + string message = 2; + optional bytes data = 3; +} + +// SQL execution request +message SqlRequest { + string sql = 1; +} + +// Create function request +message CreateFunctionRequest { + bytes config_bytes = 1; + bytes function_bytes = 2; +} + +// Python module information +message PythonModule { + string module_name = 1; + bytes module_bytes = 2; +} + +// Create Python function request (for creating Python function dynamically) +message CreatePythonFunctionRequest { + string class_name = 1; + repeated PythonModule modules = 2; + string config_content = 3; +} + +// Function information (for ShowFunctions response) +message FunctionInfo { + string name = 1; + string task_type = 2; + string status = 3; +} + +// Drop function request +message DropFunctionRequest { + string function_name = 1; +} + +// Show functions request (optional filter, empty for list all) +message ShowFunctionsRequest { + optional string filter = 1; +} + +// Show functions response (status_code, message, and list of FunctionInfo) +message ShowFunctionsResponse { + StatusCode status_code = 1; + string message = 2; + repeated FunctionInfo functions = 3; +} + +// Start function request +message StartFunctionRequest { + string function_name = 1; +} + +// Stop function request +message StopFunctionRequest { + string function_name = 1; +} + +// Function Stream service +service FunctionStreamService { + // SQL execution + rpc ExecuteSql(SqlRequest) returns (Response); + + // Create function + rpc CreateFunction(CreateFunctionRequest) returns (Response); + + // Create Python function dynamically + rpc CreatePythonFunction(CreatePythonFunctionRequest) returns (Response); + + // Drop function + rpc DropFunction(DropFunctionRequest) returns (Response); + + // Show functions (returns list of FunctionInfo) + rpc ShowFunctions(ShowFunctionsRequest) returns (ShowFunctionsResponse); + + // Start function + rpc StartFunction(StartFunctionRequest) returns (Response); + + // Stop function + rpc StopFunction(StopFunctionRequest) returns (Response); +} diff --git a/protocol/src/lib.rs b/protocol/src/lib.rs new file mode 100644 index 00000000..b0c6da06 --- /dev/null +++ b/protocol/src/lib.rs @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers protocol definitions for function stream +// This module exports the generated Protocol Buffers code + +// CLI module - exports client code +#[path = "../generated/cli/function_stream.rs"] +pub mod cli; + +// Service module - exports server code +#[path = "../generated/service/function_stream.rs"] +pub mod service; + +// Re-export commonly used types from both modules +// Data structures are the same in both, so we can re-export from either +pub use cli::function_stream_service_client; + +// Re-export client-specific types +pub use cli::function_stream_service_client::FunctionStreamServiceClient; + +// Re-export server-specific types +pub use service::function_stream_service_server::{ + FunctionStreamService, FunctionStreamServiceServer, +}; diff --git a/python/functionstream-api/Makefile b/python/functionstream-api/Makefile new file mode 100644 index 00000000..309332fd --- /dev/null +++ b/python/functionstream-api/Makefile @@ -0,0 +1,176 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ============================================================================== +# Function Stream API - Industrial Build System +# ============================================================================== + +# --- 1. Configuration & Variables --- + +# Project Metadata +PROJECT_NAME := functionstream-api +PACKAGE_NAME := fs_api + +# Shell setup +SHELL := /bin/bash +.SHELLFLAGS := -eu -o pipefail -c + +# Directory paths +mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) +current_dir := $(dir $(mkfile_path)) + +# [Standard Monorepo Roots] +# SCRIPT_DIR: Current directory (.../python/functionstream-api) +SCRIPT_DIR := $(current_dir) +# MONOREPO_ROOT: Grandparent directory (.../function-stream) +MONOREPO_ROOT := $(abspath $(current_dir)/../..) + +# Output paths +DIST_DIR := $(SCRIPT_DIR)/dist +BUILD_DIR := $(SCRIPT_DIR)/build + +# Virtual Environment (Shared) +VENV_DIR := $(MONOREPO_ROOT)/.venv +VENV_BIN := $(VENV_DIR)/bin +PYTHON := $(VENV_BIN)/python +PIP := $(VENV_BIN)/pip +TWINE := $(VENV_BIN)/twine +PYTEST := $(VENV_BIN)/pytest + +# --- 2. Colors & Logging --- +ifneq ($(shell test -t 0; echo $$?),0) + color_reset := + color_green := + color_blue := + color_red := + color_yellow:= +else + color_reset := $(shell tput sgr0) + color_green := $(shell tput setaf 2) + color_blue := $(shell tput setaf 4) + color_red := $(shell tput setaf 1) + color_yellow:= $(shell tput setaf 3) +endif + +define log_info + @echo "$(color_blue)[INFO]$(color_reset) $1" +endef + +define log_success + @echo "$(color_green)[SUCCESS]$(color_reset) $1" +endef + +define log_warn + @echo "$(color_yellow)[WARN]$(color_reset) $1" +endef + +# --- 3. Targets --- + +.PHONY: all help venv install-deps install install-dev build clean test publish info uninstall + +# Default target +all: build + +# Ensure venv exists (Check root) +$(PYTHON): + $(call log_warn, Virtual environment not found at $(VENV_DIR)) + $(call log_info, Please run 'python3 -m venv .venv' in the Monorepo root first.) + @exit 1 + +venv: $(PYTHON) + +# [FIX] Explicitly install build tools (Idempotent) +install-deps: venv + $(call log_info, Installing build dependencies...) + @$(PIP) install --upgrade pip setuptools wheel build twine pytest + @$(call log_success, Build dependencies installed.) + +# Install package (Release simulation) +install: install-deps + $(call log_info, Installing $(PROJECT_NAME)...) + @$(PIP) install . + $(call log_success, Installation complete.) + +# Install in editable mode (Development) +install-dev: install-deps + $(call log_info, Installing $(PROJECT_NAME) in editable mode...) + @$(PIP) install -e ".[dev]" || $(PIP) install -e . + $(call log_success, Editable installation complete.) + +# Build Source and Wheel distribution +# [FIX] Added install-deps as a prerequisite +build: install-deps clean + $(call log_info, Building distribution artifacts...) + @$(PYTHON) -m build + $(call log_info, Checking artifacts with Twine...) + @$(TWINE) check dist/* + $(call log_success, Build complete. Artifacts in 'dist/'.) + +# Publish to PyPI +publish: build + $(call log_warn, You are about to upload to PyPI.) + @$(TWINE) upload dist/* + +# Run tests +test: install-deps + $(call log_info, Running tests...) + @if [ -d "tests" ]; then \ + $(PYTEST) tests/ -v; \ + else \ + $(call log_warn, No 'tests' directory found. Skipping.); \ + fi + +# Deep clean +clean: + $(call log_info, Cleaning build artifacts...) + @rm -rf $(DIST_DIR) $(BUILD_DIR) + @rm -rf *.egg-info .eggs + @find . -type d -name "__pycache__" -exec rm -rf {} + + @find . -type d -name "*.egg-info" -exec rm -rf {} + + @find . -type d -name ".pytest_cache" -exec rm -rf {} + + @find . -type d -name ".coverage" -exec rm -rf {} + + @find . -type f -name "*.pyc" -delete + @find . -type f -name "*.pyo" -delete + $(call log_success, Clean complete.) + +# Uninstall +uninstall: + $(call log_info, Uninstalling $(PROJECT_NAME)...) + @$(PIP) uninstall -y $(PROJECT_NAME) || true + $(call log_success, Uninstallation complete.) + +# Info +info: venv + @echo "$(color_blue)Project Info:$(color_reset)" + @echo " Name: $(PROJECT_NAME)" + @echo " Package: $(PACKAGE_NAME)" + @echo " Venv: $(VENV_DIR)" + @echo " Python: $(shell $(PYTHON) --version)" + @echo "" + @echo "$(color_blue)Installed Packages:$(color_reset)" + @$(PIP) list | grep -E "build|wheel|twine|$(PROJECT_NAME)" + +# Help +help: + @echo "$(color_blue)$(PROJECT_NAME) Build System$(color_reset)" + @echo "Usage: make " + @echo "" + @echo "Targets:" + @awk '/^[a-zA-Z\-\_0-9]+:/ { \ + helpMessage = match(lastLine, /^# (.*)/); \ + if (helpMessage) { \ + helpCommand = substr($$1, 0, index($$1, ":")-1); \ + helpMessage = substr(lastLine, RSTART + 2, RLENGTH); \ + printf " $(color_green)%-15s$(color_reset) %s\n", helpCommand, helpMessage; \ + } \ + } \ + { lastLine = $$0 }' $(MAKEFILE_LIST) \ No newline at end of file diff --git a/python/functionstream-api/build_package.py b/python/functionstream-api/build_package.py new file mode 100644 index 00000000..51b73408 --- /dev/null +++ b/python/functionstream-api/build_package.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Build script for Function Stream API python Package. + +This script handles the lifecycle of building the python distribution artifacts +(Wheel and Source Distribution) using modern python packaging standards. + +Note: This file is named build_package.py (not build.py) on purpose. A local +build.py would shadow the PyPA "build" module when running "python -m build", +causing infinite recursion. Use "python build_package.py" for this script, or +"python -m build" / "make build" for the standard build. +""" + +import sys +import shutil +import logging +import subprocess +import importlib.util +from pathlib import Path +from typing import List + +# --- Configuration --- +SCRIPT_DIR = Path(__file__).parent.absolute() +DIST_DIR = SCRIPT_DIR / "dist" +BUILD_DIR = SCRIPT_DIR / "build" +EGG_INFO_PATTERN = "*.egg-info" + +# Configure Logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - [%(levelname)s] - %(message)s", + datefmt="%H:%M:%S" +) +logger = logging.getLogger("fs_builder") + + +class BuildError(Exception): + """Custom exception for build failures.""" + + +class Builder: + """Encapsulates the build logic.""" + + def __init__(self, work_dir: Path): + self.work_dir = work_dir + self.dist_dir = work_dir / "dist" + self.python_exec = sys.executable + + def check_environment(self) -> None: + """ + Verifies that the build environment has necessary tools. + Raises BuildError if tools are missing. + """ + logger.info("Checking build environment...") + + required_modules = ["setuptools", "wheel", "build"] + missing = [] + + for mod in required_modules: + if importlib.util.find_spec(mod) is None: + missing.append(mod) + + if missing: + logger.error(f"Missing build dependencies: {', '.join(missing)}") + logger.info("Please run: pip install --upgrade build setuptools wheel") + raise BuildError("Environment check failed.") + + logger.info("✓ Environment check passed.") + + def clean(self) -> None: + """ + Cleans up previous build artifacts. + """ + logger.info("Cleaning build artifacts...") + + paths_to_clean: List[Path] = [ + self.work_dir / "build", + self.work_dir / "dist", + ] + + # Add egg-info directories + paths_to_clean.extend(self.work_dir.glob(EGG_INFO_PATTERN)) + + for path in paths_to_clean: + if not path.exists(): + continue + + try: + if path.is_dir(): + shutil.rmtree(path) + logger.debug(f"Removed directory: {path}") + else: + path.unlink() + logger.debug(f"Removed file: {path}") + except OSError as e: + logger.warning(f"Failed to remove {path}: {e}") + + logger.info("✓ Clean completed.") + + def build(self) -> None: + """ + Executes the build process using the PyPA `build` module. + Runs from work_dir.parent with work_dir as srcdir so that a local + build.py in work_dir cannot shadow "python -m build", avoiding + infinite recursion. + """ + logger.info("Building python package...") + + self.dist_dir.mkdir(parents=True, exist_ok=True) + + # Run from parent directory and explicitly pass project path to avoid + # local build.py shadowing PyPA's build + cmd = [ + self.python_exec, "-m", "build", + "--outdir", str(self.dist_dir), + str(self.work_dir), + ] + # Cannot be work_dir, otherwise "python -m build" will load local + # build.py causing infinite recursion + run_cwd = self.work_dir.parent + + logger.info(f"Executing: {' '.join(cmd)}") + + try: + # Use inherit (stdout/stderr=None) to avoid PIPE buffer full + # causing subprocess blocking or timeout + # Note: subprocess.run is safe here as cmd is constructed from + # controlled build-time inputs, not user-provided data + subprocess.run( # noqa: S603 + cmd, + cwd=run_cwd, + check=True, + stdout=None, + stderr=None, + ) + except subprocess.CalledProcessError as e: + logger.error("Build process failed.") + # When using inherit, no captured content, prompt user to check + # terminal output above + if e.stdout or e.stderr: + logger.error(f"STDOUT:\n{e.stdout}") + logger.error(f"STDERR:\n{e.stderr}") + else: + logger.error("See terminal output above for details.") + raise BuildError("Build command failed.") from e + + self._summarize_artifacts() + + def _summarize_artifacts(self) -> None: + """Log the results of the build.""" + if not self.dist_dir.exists(): + raise BuildError("Dist directory was not created.") + + artifacts = list(self.dist_dir.glob("*")) + if not artifacts: + raise BuildError("No artifacts found in dist directory.") + + logger.info("✓ Build Successful! Generated artifacts:") + for artifact in sorted(artifacts): + size_kb = artifact.stat().st_size / 1024 + logger.info(f" - {artifact.name:<30} ({size_kb:.2f} KB)") + + def verify(self) -> None: + """ + Performs post-build verification. + """ + logger.info("Verifying package artifacts...") + + has_wheel = any(self.dist_dir.glob("*.whl")) + has_sdist = any(self.dist_dir.glob("*.tar.gz")) + + if not has_wheel: + logger.warning("⚠️ No Wheel (.whl) file found.") + if not has_sdist: + logger.warning("⚠️ No Source Distribution (.tar.gz) found.") + + if not (has_wheel or has_sdist): + raise BuildError("Verification failed: No valid artifacts generated.") + + logger.info("✓ Verification passed.") + + def run(self) -> None: + """Main execution flow.""" + print("=" * 60) + print(" Function Stream API - Builder") + print("=" * 60) + + try: + self.check_environment() + self.clean() + self.build() + self.verify() + + print("\n" + "=" * 60) + logger.info("Process completed successfully.") + print("To install:") + print(f" pip install {self.dist_dir}/*.whl") + print("=" * 60) + + except BuildError as e: + logger.error(f"Build failed: {e}") + sys.exit(1) + except KeyboardInterrupt: + logger.error("Build interrupted by user.") + sys.exit(130) + except Exception: + logger.exception("An unexpected error occurred:") + sys.exit(1) + + +def main(): + builder = Builder(work_dir=SCRIPT_DIR) + builder.run() + + +if __name__ == "__main__": + main() diff --git a/python/functionstream-api/pyproject.toml b/python/functionstream-api/pyproject.toml new file mode 100644 index 00000000..a6b56128 --- /dev/null +++ b/python/functionstream-api/pyproject.toml @@ -0,0 +1,31 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "functionstream-api" +version = "0.0.1" +description = "Function Stream API - Pure interface definitions" +requires-python = ">=3.7" +license = "Apache-2.0" +authors = [ + {name = "Function Stream Team"} +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] +dependencies = [ + "cloudpickle>=2.0.0", +] + +[tool.setuptools] +package-dir = {"" = "src"} +packages = ["fs_api", "fs_api.store"] + diff --git a/functions/example-external.yaml b/python/functionstream-api/src/fs_api/__init__.py similarity index 50% rename from functions/example-external.yaml rename to python/functionstream-api/src/fs_api/__init__.py index 732bc62b..1ecdd42d 100644 --- a/functions/example-external.yaml +++ b/python/functionstream-api/src/fs_api/__init__.py @@ -1,5 +1,3 @@ -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -7,23 +5,35 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -name: external-function -namespace: function-stream -runtime: - type: "external" -sources: - - config: - inputs: - - "external-input" - subscription-name: "function-stream" - type: "memory" -sink: - config: - output: "external-output" - type: "memory" -replicas: 1 \ No newline at end of file + +__version__ = "0.0.1" + +from .context import Context +from .driver import FSProcessorDriver +from .store import ( + KvError, + KvNotFoundError, + KvIOError, + KvOtherError, + ComplexKey, + KvIterator, + KvStore, +) + +__all__ = [ + "Context", + "FSProcessorDriver", + "KvError", + "KvNotFoundError", + "KvIOError", + "KvOtherError", + "ComplexKey", + "KvIterator", + "KvStore", +] + diff --git a/python/functionstream-api/src/fs_api/context.py b/python/functionstream-api/src/fs_api/context.py new file mode 100644 index 00000000..52cbd8a5 --- /dev/null +++ b/python/functionstream-api/src/fs_api/context.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +fs_api.context + +Context: Context object +""" +import abc +from typing import Dict +from .store import KvStore + + +class Context(abc.ABC): + """Context object""" + + @abc.abstractmethod + def emit(self, data: bytes, channel: int = 0): + pass + + @abc.abstractmethod + def emit_watermark(self, watermark: int, channel: int = 0): + pass + + @abc.abstractmethod + def getOrCreateKVStore(self, name: str) -> KvStore: + pass + + @abc.abstractmethod + def getConfig(self) -> Dict[str, str]: + """ + Get global configuration Map + + Returns: + Dict[str, str]: Configuration dictionary + """ + +__all__ = ['Context'] diff --git a/python/functionstream-api/src/fs_api/driver.py b/python/functionstream-api/src/fs_api/driver.py new file mode 100644 index 00000000..667b3a91 --- /dev/null +++ b/python/functionstream-api/src/fs_api/driver.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from .context import Context + + +class FSProcessorDriver(abc.ABC): + + @abc.abstractmethod + def init(self, ctx: Context, config: dict): + pass + + @abc.abstractmethod + def process(self, ctx: Context, source_id: int, data: bytes): + pass + + @abc.abstractmethod + def process_watermark(self, ctx: Context, source_id: int, watermark: int): + pass + + @abc.abstractmethod + def take_checkpoint(self, ctx: Context, checkpoint_id: int): + pass + + @abc.abstractmethod + def check_heartbeat(self, ctx: Context) -> bool: + pass + + @abc.abstractmethod + def close(self, ctx: Context): + pass + + @abc.abstractmethod + def custom(self, payload: bytes) -> bytes: + pass + +__all__ = ['FSProcessorDriver'] + diff --git a/conf/function-stream.yaml b/python/functionstream-api/src/fs_api/store/__init__.py similarity index 56% rename from conf/function-stream.yaml rename to python/functionstream-api/src/fs_api/store/__init__.py index 77a073c8..1cfdc8f8 100644 --- a/conf/function-stream.yaml +++ b/python/functionstream-api/src/fs_api/store/__init__.py @@ -1,5 +1,3 @@ -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -7,20 +5,24 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -listen_addr: ":7300" -queue: - type: "pulsar" - config: - pulsar_url: "pulsar://localhost:6650" -tube-config: - pulsar: - pulsar_url: "pulsar://localhost:6650" -runtime-config: - external: - socket-path: /tmp/fs.sock -function-store: ./functions \ No newline at end of file + +from .error import KvError, KvNotFoundError, KvIOError, KvOtherError +from .complexkey import ComplexKey +from .iterator import KvIterator +from .store import KvStore + +__all__ = [ + 'KvError', + 'KvNotFoundError', + 'KvIOError', + 'KvOtherError', + 'ComplexKey', + 'KvIterator', + 'KvStore', +] + diff --git a/.chglog/gen-chg-log.sh b/python/functionstream-api/src/fs_api/store/complexkey.py old mode 100755 new mode 100644 similarity index 67% rename from .chglog/gen-chg-log.sh rename to python/functionstream-api/src/fs_api/store/complexkey.py index 307d9d90..dc5e4afe --- a/.chglog/gen-chg-log.sh +++ b/python/functionstream-api/src/fs_api/store/complexkey.py @@ -1,6 +1,3 @@ -#!/bin/bash -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -8,13 +5,20 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -set -xe +from dataclasses import dataclass + + +@dataclass +class ComplexKey: + key_group: bytes + key: bytes + namespace: bytes + user_key: bytes -cd "$(git rev-parse --show-toplevel)" +__all__ = ['ComplexKey'] -git-chglog --output CHANGELOG.md \ No newline at end of file diff --git a/python/functionstream-api/src/fs_api/store/error.py b/python/functionstream-api/src/fs_api/store/error.py new file mode 100644 index 00000000..110b500c --- /dev/null +++ b/python/functionstream-api/src/fs_api/store/error.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class KvError(Exception): + pass + + +class KvNotFoundError(KvError): + pass + + +class KvIOError(KvError): + def __init__(self, message: str): + self.message = message + super().__init__(message) + + +class KvOtherError(KvError): + def __init__(self, message: str): + self.message = message + super().__init__(message) + + +__all__ = ['KvError', 'KvNotFoundError', 'KvIOError', 'KvOtherError'] + diff --git a/tests/test_config.yaml b/python/functionstream-api/src/fs_api/store/iterator.py similarity index 59% rename from tests/test_config.yaml rename to python/functionstream-api/src/fs_api/store/iterator.py index d86ba7e4..76d461bc 100644 --- a/tests/test_config.yaml +++ b/python/functionstream-api/src/fs_api/store/iterator.py @@ -1,5 +1,3 @@ -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -7,15 +5,23 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -listen-addr: ":17300" -tube-config: - my-tube: - key: "value" -runtime-config: - custom-runtime: - name: "test" \ No newline at end of file +import abc +from typing import Optional, Tuple + + +class KvIterator(abc.ABC): + + @abc.abstractmethod + def has_next(self) -> bool: + pass + + @abc.abstractmethod + def next(self) -> Optional[Tuple[bytes, bytes]]: + pass + +__all__ = ['KvIterator'] diff --git a/python/functionstream-api/src/fs_api/store/store.py b/python/functionstream-api/src/fs_api/store/store.py new file mode 100644 index 00000000..ce0d5c62 --- /dev/null +++ b/python/functionstream-api/src/fs_api/store/store.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +fs_api.store.store + +WIT: resource kv.store +""" + +import abc +from typing import Optional, List +from .complexkey import ComplexKey +from .iterator import KvIterator + + +class KvStore(abc.ABC): + + @abc.abstractmethod + def put_state(self, key: bytes, value: bytes): + pass + + @abc.abstractmethod + def get_state(self, key: bytes) -> Optional[bytes]: + pass + + @abc.abstractmethod + def delete_state(self, key: bytes): + pass + + @abc.abstractmethod + def list_states(self, start_inclusive: bytes, end_exclusive: bytes) -> List[bytes]: + pass + + @abc.abstractmethod + def put(self, key: ComplexKey, value: bytes): + pass + + @abc.abstractmethod + def get(self, key: ComplexKey) -> Optional[bytes]: + pass + + @abc.abstractmethod + def delete(self, key: ComplexKey): + pass + + @abc.abstractmethod + def merge(self, key: ComplexKey, value: bytes): + pass + + @abc.abstractmethod + def delete_prefix(self, key: ComplexKey): + pass + + @abc.abstractmethod + def list_complex(self, key_group: bytes, key: bytes, namespace: bytes, + start_inclusive: bytes, end_exclusive: bytes) -> List[bytes]: + pass + + @abc.abstractmethod + def scan_complex(self, key_group: bytes, key: bytes, namespace: bytes) -> KvIterator: + pass + +__all__ = ['KvStore'] + diff --git a/python/functionstream-client/Makefile b/python/functionstream-client/Makefile new file mode 100644 index 00000000..e934ace3 --- /dev/null +++ b/python/functionstream-client/Makefile @@ -0,0 +1,176 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# --- 1. Configuration & Variables --- + +# Project Metadata +PROJECT_NAME := functionstream-client +PACKAGE_NAME := fs_client + +# Shell setup (Fail fast) +SHELL := /bin/bash +.SHELLFLAGS := -eu -o pipefail -c + +# Directory paths (Absolute paths for safety) +mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) +current_dir := $(dir $(mkfile_path)) + +# [Standard Monorepo Roots] +SCRIPT_DIR := $(current_dir) +PYTHON_ROOT := $(abspath $(current_dir)/..) +MONOREPO_ROOT := $(abspath $(current_dir)/../..) + +# Output & Source Paths +SRC_DIR := $(SCRIPT_DIR)/src +DIST_DIR := $(SCRIPT_DIR)/dist +BUILD_DIR := $(SCRIPT_DIR)/build +PROTO_OUT_DIR := $(SRC_DIR)/$(PACKAGE_NAME)/_proto + +# Virtual Environment (Shared) +VENV_DIR := $(MONOREPO_ROOT)/.venv +VENV_BIN := $(VENV_DIR)/bin +PYTHON := $(VENV_BIN)/python +PIP := $(VENV_BIN)/pip +TWINE := $(VENV_BIN)/twine +BLACK := $(VENV_BIN)/black +ISORT := $(VENV_BIN)/isort +MYPY := $(VENV_BIN)/mypy + +# --- 2. Colors & Logging --- +ifneq ($(shell test -t 0; echo $$?),0) + color_reset := + color_green := + color_blue := + color_red := +else + color_reset := $(shell tput sgr0) + color_green := $(shell tput setaf 2) + color_blue := $(shell tput setaf 4) + color_red := $(shell tput setaf 1) +endif + +define log_info + @echo "$(color_blue)[INFO]$(color_reset) $1" +endef + +define log_success + @echo "$(color_green)[SUCCESS]$(color_reset) $1" +endef + +define log_warn + @echo "$(shell tput setaf 3)[WARN]$(color_reset) $1" +endef + +# --- 3. Targets --- + +.PHONY: all venv install-deps install install-dev proto build clean lint format publish info help + +# Default target +all: install-dev proto build + +# Check venv +$(PYTHON): + $(call log_warn, Virtual environment not found at $(VENV_DIR)) + $(call log_info, Please run 'python3 -m venv .venv' in the Monorepo root first.) + @exit 1 + +venv: $(PYTHON) + +# Install Dependencies (Runtime + Dev + Build tools) +install-deps: venv + $(call log_info, Installing dependencies...) + @$(PIP) install --upgrade pip setuptools wheel build twine + @$(PIP) install grpcio-tools mypy-protobuf mypy black isort types-protobuf + @$(call log_success, Dependencies installed.) + +# Install package in editable mode (Development) +install-dev: install-deps + $(call log_info, Installing $(PROJECT_NAME) in editable mode...) + @$(PIP) install -e . + $(call log_success, Editable installation complete.) + +# Generate Protobuf/gRPC code +proto: venv + $(call log_info, Generating gRPC code from protocols...) + @# Ensure output directory exists and is a package + @mkdir -p $(PROTO_OUT_DIR) + @touch $(PROTO_OUT_DIR)/__init__.py + @# Delegate to the python script, but ensure it runs in the venv + @$(PYTHON) scripts/codegen.py + $(call log_success, Proto code generated.) + +# Code Formatting (Auto-fix) +format: venv + $(call log_info, Formatting code with Black and isort...) + @$(ISORT) $(SRC_DIR) + @$(BLACK) $(SRC_DIR) + $(call log_success, Formatting complete.) + +# Code Linting (Checks only) +lint: venv + $(call log_info, Linting code...) + @$(ISORT) --check-only $(SRC_DIR) + @$(BLACK) --check $(SRC_DIR) + @$(MYPY) $(SRC_DIR) --ignore-missing-imports || true + $(call log_success, Lint checks passed.) + +# Build Distribution (Wheel & Sdist) +build: venv clean proto + $(call log_info, Building distribution artifacts...) + @$(PYTHON) -m build + $(call log_info, Checking artifacts with Twine...) + @$(TWINE) check dist/* + $(call log_success, Build complete. Artifacts in 'dist/'.) + +# Deep Clean +clean: + $(call log_info, Cleaning build artifacts...) + @rm -rf $(DIST_DIR) $(BUILD_DIR) + @rm -rf *.egg-info .eggs + @find . -type d -name "__pycache__" -exec rm -rf {} + + @find . -type d -name "*.egg-info" -exec rm -rf {} + + @find . -type d -name ".mypy_cache" -exec rm -rf {} + + @# Clean generated proto files (be careful not to delete __init__.py if manually maintained) + @rm -f $(PROTO_OUT_DIR)/*_pb2.py + @rm -f $(PROTO_OUT_DIR)/*_pb2.pyi + @rm -f $(PROTO_OUT_DIR)/*_pb2_grpc.py + @rm -f $(PROTO_OUT_DIR)/*_pb2_grpc.pyi + $(call log_success, Clean completed.) + +# Publish to PyPI +publish: build + $(call log_warn, You are about to upload to PyPI.) + @$(TWINE) upload dist/* + +# Project Info +info: venv + @echo "$(color_blue)Project Info:$(color_reset)" + @echo " Project: $(PROJECT_NAME)" + @echo " Package: $(PACKAGE_NAME)" + @echo " Venv: $(VENV_DIR)" + @echo " Python: $(shell $(PYTHON) --version)" + +# Help +help: + @echo "$(color_blue)$(PROJECT_NAME) Build System$(color_reset)" + @echo "Usage: make " + @echo "" + @echo "Targets:" + @awk '/^[a-zA-Z\-\_0-9]+:/ { \ + helpMessage = match(lastLine, /^# (.*)/); \ + if (helpMessage) { \ + helpCommand = substr($$1, 0, index($$1, ":")-1); \ + helpMessage = substr(lastLine, RSTART + 2, RLENGTH); \ + printf " $(color_green)%-15s$(color_reset) %s\n", helpCommand, helpMessage; \ + } \ + } \ + { lastLine = $$0 }' $(MAKEFILE_LIST) \ No newline at end of file diff --git a/python/functionstream-client/pyproject.toml b/python/functionstream-client/pyproject.toml new file mode 100644 index 00000000..434630c6 --- /dev/null +++ b/python/functionstream-client/pyproject.toml @@ -0,0 +1,39 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "functionstream-client" +version = "0.0.1" +description = "Function Stream Python Client - gRPC client library" +requires-python = ">=3.7" +license = "Apache-2.0" +authors = [ + {name = "Function Stream Team"} +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] +dependencies = [ + "functionstream-api>=0.0.1", + "grpcio>=1.50.0", + "protobuf>=4.21.0", + "pyyaml>=6.0", +] + +[project.optional-dependencies] +dev = [ + "grpcio-tools>=1.50.0", + "mypy-protobuf>=3.0.0", +] + +[tool.setuptools] +package-dir = {"" = "src"} +packages = ["fs_client", "fs_client._proto"] diff --git a/python/functionstream-client/scripts/codegen.py b/python/functionstream-client/scripts/codegen.py new file mode 100755 index 00000000..893e4fc9 --- /dev/null +++ b/python/functionstream-client/scripts/codegen.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Industrial-grade Protobuf/gRPC Code Generator for python. +""" + +import sys +import re +import shutil +import logging +import argparse +from pathlib import Path +from typing import List, Optional + +# Configure Logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - [%(levelname)s] - %(message)s", + datefmt="%H:%M:%S", +) +logger = logging.getLogger(__name__) + +try: + from grpc_tools import protoc +except ImportError: + logger.critical("Error: 'grpcio-tools' is not installed.") + logger.critical("Please install it via: pip install grpcio-tools") + sys.exit(1) + + +class CodeGenerator: + def __init__(self, proto_root: Path, output_dir: Path, proto_files: List[str]): + self.proto_root = proto_root.resolve() + self.output_dir = output_dir.resolve() + self.proto_files = proto_files + self._check_paths() + + def _check_paths(self): + """Validates input paths.""" + if not self.proto_root.exists(): + raise FileNotFoundError( + f"Proto root directory not found: {self.proto_root}" + ) + + for p_file in self.proto_files: + if not (self.proto_root / p_file).exists(): + raise FileNotFoundError( + f"Proto file not found: {self.proto_root / p_file}" + ) + + def _prepare_output_dir(self): + """Creates or cleans the output directory and adds __init__.py.""" + if not self.output_dir.exists(): + self.output_dir.mkdir(parents=True, exist_ok=True) + logger.info(f"Created output directory: {self.output_dir}") + + init_file = self.output_dir / "__init__.py" + if not init_file.exists(): + init_file.write_text( + "# Generated by scripts/codegen.py\n" + "# This package contains generated protobuf code.\n" + "# Do not edit these files manually.\n", + encoding="utf-8", + ) + logger.info(f"Created package marker: {init_file}") + + def _get_protoc_args(self) -> List[str]: + """Constructs the protoc command arguments.""" + args = [ + "grpc_tools.protoc", + f"-I{self.proto_root}", + f"--python_out={self.output_dir}", + f"--grpc_python_out={self.output_dir}", + ] + + plugin_path = self._resolve_protoc_gen_mypy() + if plugin_path is not None: + args.append(f"--plugin=protoc-gen-mypy={plugin_path}") + args.extend( + [ + f"--mypy_out={self.output_dir}", + f"--mypy_grpc_out={self.output_dir}", + ] + ) + logger.info("Enabled mypy-protobuf type generation.") + else: + logger.warning( + "mypy-protobuf not found. Skipping .pyi generation." + ) + + for p_file in self.proto_files: + args.append(str(self.proto_root / p_file)) + + return args + + @staticmethod + def _resolve_protoc_gen_mypy() -> Optional[str]: + """ + Resolves the protoc-gen-mypy executable path. + Protoc looks for plugins via PATH; when run from make/CI, venv bin may + not be on PATH. Prefer the executable next to sys.executable, + then PATH. + """ + # 1. Same dir as current Python (venv bin when run via make) + bin_dir = Path(sys.executable).resolve().parent + for name in ("protoc-gen-mypy", "protoc-gen-mypy.exe"): + candidate = bin_dir / name + if candidate.exists() and ( + candidate.is_file() or candidate.is_symlink() + ): + return str(candidate) + + # 2. On PATH + which = shutil.which("protoc-gen-mypy") + if which: + return which + + return None + + def _fix_imports(self): + """ + Fixes the relative import issue in generated gRPC files. + Converts 'import xxx_pb2' to 'from . import xxx_pb2'. + """ + logger.info("Scanning for imports to fix...") + + import_pattern = re.compile(r"^import (\w+_pb2)(.*)$", re.MULTILINE) + fixed_count = 0 + + for py_file in self.output_dir.glob("*_pb2_grpc.py"): + text = py_file.read_text(encoding="utf-8") + new_text, n = import_pattern.subn(r"from . import \1\2", text) + + if n > 0: + py_file.write_text(new_text, encoding="utf-8") + logger.debug(f"Fixed {n} imports in {py_file.name}") + fixed_count += 1 + + if fixed_count > 0: + logger.info(f"Successfully fixed imports in {fixed_count} files.") + else: + logger.info("No files needed import fixes.") + + def run(self): + """Orchestrates the generation process.""" + try: + self._prepare_output_dir() + + args = self._get_protoc_args() + logger.info(f"Running protoc for: {self.proto_files}") + + exit_code = protoc.main([""] + args[1:]) + if exit_code != 0: + logger.error(f"Protoc failed with exit code {exit_code}") + sys.exit(exit_code) + + logger.info("Protoc compilation successful.") + self._fix_imports() + logger.info(f"Code generation complete. Output: {self.output_dir}") + + except Exception: + logger.exception( + "An unexpected error occurred during code generation." + ) + sys.exit(1) + + +def main(): + current_script = Path(__file__).resolve() + project_root = current_script.parent.parent + default_proto_root = project_root.parent.parent / "protocol" / "proto" + default_out_dir = project_root / "src" / "fs_client" / "_proto" + + parser = argparse.ArgumentParser( + description="Generate python code from Proto files." + ) + parser.add_argument( + "--proto-root", + type=Path, + default=default_proto_root, + help="Root directory containing .proto files", + ) + parser.add_argument( + "--out-dir", + type=Path, + default=default_out_dir, + help="Output directory for generated python code", + ) + parser.add_argument( + "files", + nargs="*", + default=["function_stream.proto"], + help=( + "Specific .proto files to compile " + "(default: function_stream.proto)" + ), + ) + + args = parser.parse_args() + + logger.info("Starting Code Generation...") + logger.info(f" Proto Root: {args.proto_root}") + logger.info(f" Output Dir: {args.out_dir}") + + generator = CodeGenerator( + proto_root=args.proto_root, + output_dir=args.out_dir, + proto_files=args.files, + ) + generator.run() + + +if __name__ == "__main__": + main() + diff --git a/.github/workflows/lint.yml b/python/functionstream-client/src/fs_client/__init__.py similarity index 51% rename from .github/workflows/lint.yml rename to python/functionstream-client/src/fs_client/__init__.py index 03aea8d1..965645a0 100644 --- a/.github/workflows/lint.yml +++ b/python/functionstream-client/src/fs_client/__init__.py @@ -1,5 +1,3 @@ -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,26 +10,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: Lint +""" +Function Stream python Client + +A high-level gRPC client library for interacting with the Function Stream service. +""" -on: - pull_request: +__version__ = "0.0.1" -jobs: - lint: - name: Run on Ubuntu - runs-on: ubuntu-latest - steps: - - name: Clone the code - uses: actions/checkout@v4 +from .client import FsClient +from .config import WasmTaskBuilder as ConfigBuilder, WasmTaskConfig as Config +from .exceptions import ( + ClientError, + ServerError, +) +from .models import FunctionInfo, ShowFunctionsResult - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version-file: ./operator/go.mod +__all__ = [ + "FsClient", + "Config", + "ConfigBuilder", + "ClientError", + "ServerError", + "FunctionInfo", + "ShowFunctionsResult", +] - - name: Run linter - uses: golangci/golangci-lint-action@v6 - working-directory: ./operator/ - with: - version: v1.63.4 diff --git a/python/functionstream-client/src/fs_client/client.py b/python/functionstream-client/src/fs_client/client.py new file mode 100644 index 00000000..a65df21e --- /dev/null +++ b/python/functionstream-client/src/fs_client/client.py @@ -0,0 +1,572 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Function Stream gRPC Client + +This module provides a production-ready client for the Function Stream service. +It handles connection management, error translation, logging, and type safety. +""" + +import ast +import importlib.util +import inspect +import logging +import sys +from pathlib import Path +from typing import Dict, List, Optional, Set, Tuple, Type, Union + +import grpc + +from fs_api import FSProcessorDriver + +from ._proto import function_stream_pb2, function_stream_pb2_grpc +from .config import WasmTaskConfig +from .exceptions import ( + AuthenticationError, + BadRequestError, + ClientError, + ConflictError, + InternalServerError, + NotFoundError, + PermissionDeniedError, + ResourceExhaustedError, + ServerError, + _convert_grpc_error, +) +from .models import FunctionInfo, ShowFunctionsResult + +logger = logging.getLogger(__name__) + + +def _resolve_relative( + module_part: Optional[str], level: int, current_package: str +) -> str: + if level == 0: + return module_part or "" + parts = (current_package or "").split(".") + for _ in range(level - 1): + if parts: + parts.pop() + prefix = ".".join(parts) + if module_part: + return f"{prefix}.{module_part}" if prefix else module_part + return prefix + + +def _get_imported_names(tree: ast.AST) -> List[Tuple[Optional[str], int]]: + out: List[Tuple[Optional[str], int]] = [] + for node in ast.walk(tree): + if isinstance(node, ast.Import): + for alias in node.names: + out.append((alias.name, 0)) + elif isinstance(node, ast.ImportFrom): + out.append((node.module, node.level or 0)) + return out + + +def _is_under_site_or_stdlib(origin: Path) -> bool: + try: + resolved = origin.resolve() + parts = resolved.parts + if "site-packages" in parts or "dist-packages" in parts: + return True + base = Path(sys.base_prefix) + try: + resolved.relative_to(base / "lib") + return True + except ValueError: + return False + except (OSError, RuntimeError): + return True + + +def _get_module_origin( + module_name: str, + result: Dict[str, Path], +) -> Optional[Path]: + """Resolve module origin path from spec or existing result.""" + if module_name == "__main__" and module_name in result: + return result[module_name] + try: + spec = importlib.util.find_spec(module_name) + except (ImportError, ValueError, ModuleNotFoundError): + return None + if spec is None or spec.origin is None or spec.origin == "built-in": + return None + return Path(spec.origin) + + +def _process_module_deps( + module_name: str, + origin: Path, + package: str, + dep_graph: Dict[str, Set[str]], + seen: Set[str], + queue: List[str], +) -> None: + """Parse module and add its imports to dep_graph and queue.""" + try: + tree = ast.parse(origin.read_text(encoding="utf-8")) + except (OSError, SyntaxError): + return + for module_part, level in _get_imported_names(tree): + abs_name = ( + module_part + if level == 0 + else _resolve_relative(module_part, level, package) + ) + if abs_name: + dep_graph[module_name].add(abs_name) + if abs_name not in seen: + queue.append(abs_name) + + +def _collect_local_deps( + driver_class: Type, + driver_file: Path, +) -> Tuple[Dict[str, Path], Dict[str, Set[str]]]: + driver_root = driver_file.resolve().parent + driver_module_name = driver_class.__module__ + result: Dict[str, Path] = {} + dep_graph: Dict[str, Set[str]] = {} + queue: List[str] = [driver_module_name] + seen: Set[str] = set() + + if driver_module_name == "__main__": + result[driver_module_name] = driver_file.resolve() + dep_graph[driver_module_name] = set() + + while queue: + module_name = queue.pop(0) + if module_name in seen: + continue + seen.add(module_name) + package = module_name.rpartition(".")[0] + + origin = _get_module_origin(module_name, result) + if origin is None: + continue + if _is_under_site_or_stdlib(origin): + continue + try: + origin.resolve().relative_to(driver_root) + except ValueError: + continue + + result[module_name] = origin + dep_graph[module_name] = set() + _process_module_deps( + module_name, origin, package, dep_graph, seen, queue + ) + + return result, dep_graph + + +def _topo_order(nodes: List[str], graph: Dict[str, Set[str]]) -> List[str]: + node_set = set(nodes) + order: List[str] = [] + g = {n: graph.get(n, set()) & node_set for n in nodes} + while g: + for n in list(g): + if all(d in order for d in g[n]): + order.append(n) + del g[n] + break + else: + break + for n in nodes: + if n not in order: + order.append(n) + return order + + +class FsClient: + """ + High-level, thread-safe client for Function Stream gRPC service. + """ + + DEFAULT_TIMEOUT = 30.0 + + DEFAULT_OPTIONS = [ + ('grpc.keepalive_time_ms', 10000), + ('grpc.keepalive_timeout_ms', 5000), + ('grpc.keepalive_permit_without_calls', 1), + ('grpc.http2.max_pings_without_data', 0), + ] + + # Mapping Business Status Codes (HTTP-like) to python Exceptions + _STATUS_CODE_MAP: Dict[int, Type[ServerError]] = { + 400: BadRequestError, + 401: AuthenticationError, + 403: PermissionDeniedError, + 404: NotFoundError, + 409: ConflictError, + 429: ResourceExhaustedError, + 500: InternalServerError, + } + + def __init__( + self, + host: str = "localhost", + port: int = 8080, + secure: bool = False, + channel: Optional[grpc.Channel] = None, + options: Optional[List[tuple]] = None, + default_timeout: float = DEFAULT_TIMEOUT, + ): + self.target = f"{host}:{port}" + self.default_timeout = default_timeout + self._own_channel = False + + if channel: + self._channel = channel + logger.debug( + f"Initialized FsClient with existing channel to {self.target}" + ) + else: + base_options = list(self.DEFAULT_OPTIONS) + if options: + base_options.extend(options) + + logger.info( + f"Connecting to FunctionStream at {self.target} (secure={secure})..." + ) + + if secure: + creds = grpc.ssl_channel_credentials() + self._channel = grpc.secure_channel( + self.target, creds, options=base_options + ) + else: + self._channel = grpc.insecure_channel( + self.target, options=base_options + ) + + self._own_channel = True + + self._stub = function_stream_pb2_grpc.FunctionStreamServiceStub( + self._channel + ) + + def create_function_from_files( + self, + function_path: Union[str, Path], + config_path: Union[str, Path], + timeout: Optional[float] = None, + ) -> bool: + """ + Create a function by reading local files. + """ + f_path = Path(function_path) + c_path = Path(config_path) + + if not f_path.exists(): + raise FileNotFoundError(f"WASM file not found: {f_path}") + if not c_path.exists(): + raise FileNotFoundError(f"Config file not found: {c_path}") + + logger.info( + f"Creating function from files: wasm={f_path.name}, config={c_path.name}" + ) + + try: + func_bytes = f_path.read_bytes() + conf_bytes = c_path.read_bytes() + except OSError as e: + logger.error(f"Failed to read function files: {e}") + raise ClientError(f"File access error: {e}") from e + + return self._create_function_internal(func_bytes, conf_bytes, timeout) + + def create_function_from_bytes( + self, + function_bytes: bytes, + config_content: Union[str, bytes], + timeout: Optional[float] = None, + ) -> bool: + """ + Create a function from in-memory bytes/strings. + """ + if isinstance(config_content, str): + real_conf_bytes = config_content.encode("utf-8") + elif isinstance(config_content, bytes): + real_conf_bytes = config_content + else: + raise TypeError( + f"config_content must be str or bytes, got {type(config_content)}" + ) + + logger.info("Creating function from in-memory bytes") + return self._create_function_internal( + function_bytes, real_conf_bytes, timeout + ) + + def _create_function_internal( + self, func_bytes: bytes, conf_bytes: bytes, timeout: float + ) -> bool: + request = function_stream_pb2.CreateFunctionRequest( + function_bytes=func_bytes, + config_bytes=conf_bytes, + ) + self._invoke(self._stub.CreateFunction, request, timeout) + return True + + def create_python_function( + self, + class_name: str, + modules: List[tuple[str, bytes]], + config_content: str, + ) -> bool: + """ + Create Python function dynamically by loading Python modules. + + Args: + class_name: Name of the Python class to load and instantiate + modules: List of tuples (module_name, module_bytes) containing + Python module code + config_content: Configuration content as string + + Returns: + True if successful + + Raises: + ClientError: If the request fails + """ + if not modules: + raise ValueError("At least one module is required") + + logger.info( + f"Creating Python function: class_name='{class_name}', " + f"modules={len(modules)}" + ) + + # Convert modules to proto format + proto_modules = [ + function_stream_pb2.PythonModule( + module_name=module_name, + module_bytes=module_bytes + ) + for module_name, module_bytes in modules + ] + + request = function_stream_pb2.CreatePythonFunctionRequest( + class_name=class_name, + modules=proto_modules, + config_content=config_content, + ) + + self._invoke(self._stub.CreatePythonFunction, request, None) + return True + + def create_python_function_from_config( + self, + config: WasmTaskConfig, + driver_class: Type, + ) -> bool: + """ + Create Python function from Config and Driver implementation. + + Extracts class_name and module bytes from the Driver class + automatically, + and uses the Config's to_yaml() for config_content. + + Args: + config: WasmTaskConfig + driver_class: The FSProcessorDriver subclass implementing the + processor + + Returns: + True if successful + + Raises: + ValueError: If driver_class is not a class + TypeError: If driver_class does not implement FSProcessorDriver + ClientError: If the request fails + """ + if not inspect.isclass(driver_class): + raise ValueError("driver_class must be a class, not an instance") + + if not issubclass(driver_class, FSProcessorDriver): + raise TypeError( + f"driver_class must implement FSProcessorDriver, " + f"got {driver_class.__name__}" + ) + + class_name = driver_class.__name__ + driver_file = Path(inspect.getfile(driver_class)) + paths, dep_graph = _collect_local_deps(driver_class, driver_file) + order = _topo_order(list(paths.keys()), dep_graph) + modules: List[Tuple[str, bytes]] = [] + for name in order: + display_name = driver_file.stem if name == "__main__" else name + try: + modules.append((display_name, paths[name].read_bytes())) + except OSError as e: + raise ClientError( + f"Failed to read module: {paths[name]}" + ) from e + + config_content = config.to_yaml() + + logger.info( + f"Creating Python function from config: class_name='{class_name}', " + f"config_name='{getattr(config, 'task_name', 'unknown')}', " + f"modules={len(modules)}" + ) + + return self.create_python_function( + class_name=class_name, + modules=modules, + config_content=config_content, + ) + + def drop_function( + self, + function_name: str, + timeout: Optional[float] = None, + ) -> bool: + """ + Drop a function by name. + + Args: + function_name: Name of the function to drop. + timeout: Optional timeout in seconds. + + Returns: + True if successful. + """ + request = function_stream_pb2.DropFunctionRequest(function_name=function_name) + self._invoke(self._stub.DropFunction, request, timeout) + return True + + def show_functions( + self, + filter_pattern: Optional[str] = None, + timeout: Optional[float] = None, + ) -> ShowFunctionsResult: + """ + List functions, optionally filtered. + + Args: + filter_pattern: Optional filter string; None to list all. + timeout: Optional timeout in seconds. + + Returns: + ShowFunctionsResult with status_code, message, and list of FunctionInfo. + """ + req = function_stream_pb2.ShowFunctionsRequest() + if filter_pattern is not None: + req.filter = filter_pattern + actual_timeout = timeout if timeout is not None else self.default_timeout + try: + response = self._stub.ShowFunctions(req, timeout=actual_timeout) + except grpc.RpcError as e: + logger.error(f"gRPC call failed: {e.code()} - {e.details()}") + raise _convert_grpc_error(e) from e + + code = response.status_code + if code >= 400: + error_msg = response.message or "Unknown server error" + logger.error(f"Server returned error: code={code}, msg={error_msg}") + exception_cls = self._STATUS_CODE_MAP.get(code, ServerError) + raise exception_cls(message=error_msg, status_code=code) + + functions = [ + FunctionInfo(name=f.name, task_type=f.task_type, status=f.status) + for f in response.functions + ] + return ShowFunctionsResult( + status_code=code, + message=response.message or "", + functions=functions, + ) + + def start_function( + self, + function_name: str, + timeout: Optional[float] = None, + ) -> bool: + """ + Start a function by name. + + Args: + function_name: Name of the function to start. + timeout: Optional timeout in seconds. + + Returns: + True if successful. + """ + request = function_stream_pb2.StartFunctionRequest(function_name=function_name) + self._invoke(self._stub.StartFunction, request, timeout) + return True + + def stop_function( + self, + function_name: str, + timeout: Optional[float] = None, + ) -> bool: + """ + Stop a function by name. + + Args: + function_name: Name of the function to stop. + timeout: Optional timeout in seconds. + + Returns: + True if successful. + """ + request = function_stream_pb2.StopFunctionRequest(function_name=function_name) + self._invoke(self._stub.StopFunction, request, timeout) + return True + + def _invoke(self, rpc_method, request, timeout: Optional[float]): + """ + Generic gRPC invocation wrapper with Error Mapping. + """ + actual_timeout = timeout if timeout is not None else self.default_timeout + + try: + response = rpc_method(request, timeout=actual_timeout) + + if response.status_code >= 400: + error_msg = response.message or "Unknown server error" + logger.error( + f"Server returned error: code={response.status_code}, " + f"msg={error_msg}" + ) + + # Automatically map status code to specific exception + exception_cls = self._STATUS_CODE_MAP.get( + response.status_code, ServerError + ) + raise exception_cls( + message=error_msg, status_code=response.status_code + ) + + return response + + except grpc.RpcError as e: + logger.error(f"gRPC call failed: {e.code()} - {e.details()}") + raise _convert_grpc_error(e) from e + + def close(self): + if self._own_channel and self._channel: + logger.debug("Closing gRPC channel") + self._channel.close() + self._channel = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/python/functionstream-client/src/fs_client/config.py b/python/functionstream-client/src/fs_client/config.py new file mode 100644 index 00000000..9a8fc54e --- /dev/null +++ b/python/functionstream-client/src/fs_client/config.py @@ -0,0 +1,158 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, List, Optional + +import yaml + +# ========================================== +# 1. Models mapping to Rust Input/Output +# ========================================== + +class KafkaInput: + def __init__(self, bootstrap_servers: str, topic: str, group_id: str, partition: Optional[int] = None): + self.data = { + "input-type": "kafka", + "bootstrap_servers": bootstrap_servers, + "topic": topic, + "group_id": group_id, + } + if partition is not None: + self.data["partition"] = partition + +class KafkaOutput: + def __init__(self, bootstrap_servers: str, topic: str, partition: int): + self.data = { + "output-type": "kafka", + "bootstrap_servers": bootstrap_servers, + "topic": topic, + "partition": partition, + } + +# ========================================== +# 2. WasmTaskConfig Object +# ========================================== + +class WasmTaskConfig: + def __init__( + self, + task_name: str, + task_type: str, + input_groups: List[Dict], + use_builtin: bool, + enable_checkpoint: bool, + checkpoint_interval: int, + init_config: Dict[str, str], + outputs: List[Dict] + ): + self.task_name = task_name + self.task_type = task_type + self.input_groups = input_groups + self.use_builtin_event_serialization = use_builtin + self.enable_checkpoint = enable_checkpoint + self.checkpoint_interval_seconds = checkpoint_interval + self.init_config = init_config + self.outputs = outputs + + def to_dict(self) -> Dict[str, Any]: + return { + "name": self.task_name, + "type": self.task_type, + "use_builtin_event_serialization": self.use_builtin_event_serialization, + "enable_checkpoint": self.enable_checkpoint, + "checkpoint_interval_seconds": self.checkpoint_interval_seconds, + "init_config": self.init_config, + "input-groups": self.input_groups, + "outputs": self.outputs, + } + + def to_yaml(self) -> str: + return yaml.dump(self.to_dict(), sort_keys=False, allow_unicode=True, indent=2) + + @classmethod + def from_yaml(cls, yaml_str: str) -> "WasmTaskConfig": + data = yaml.safe_load(yaml_str) + if not isinstance(data, dict): + raise ValueError("Config YAML must be a mapping") + name = data.get("name") or "default-processor" + if isinstance(name, str) and not name.strip(): + name = "default-processor" + return cls( + task_name=name, + task_type=data.get("type") or "python", + input_groups=data.get("input-groups") or [], + use_builtin=data.get("use_builtin_event_serialization", False), + enable_checkpoint=data.get("enable_checkpoint", False), + checkpoint_interval=max(1, data.get("checkpoint_interval_seconds", 1)), + init_config=data.get("init_config") or {}, + outputs=data.get("outputs") or [], + ) + +# ========================================== +# 3. Builder Implementation +# ========================================== + +class WasmTaskBuilder: + def __init__(self): + self._name: Optional[str] = None + self._type: str = "python" + self._use_builtin: bool = False + self._enable_checkpoint: bool = False + self._checkpoint_interval: int = 1 + self._init_config: Dict[str, str] = {} + self._input_groups_data: List[Dict] = [] + self._outputs_data: List[Dict] = [] + + def set_name(self, name: str): + self._name = name + return self + + def set_type(self, task_type: str): + self._type = task_type + return self + + def set_builtin_serialization(self, enabled: bool): + self._use_builtin = enabled + return self + + def configure_checkpoint(self, enabled: bool, interval: int = 1): + self._enable_checkpoint = enabled + self._checkpoint_interval = max(1, interval) + return self + + def add_init_config(self, key: str, value: str): + self._init_config[key] = value + return self + + def add_input_group(self, inputs: List[KafkaInput]): + self._input_groups_data.append({ + "inputs": [item.data for item in inputs] + }) + return self + + def add_output(self, output: KafkaOutput): + self._outputs_data.append(output.data) + return self + + def build(self) -> WasmTaskConfig: + final_name = self._name if (self._name and self._name.strip()) else "default-processor" + + return WasmTaskConfig( + task_name=final_name, + task_type=self._type, + input_groups=self._input_groups_data, + use_builtin=self._use_builtin, + enable_checkpoint=self._enable_checkpoint, + checkpoint_interval=self._checkpoint_interval, + init_config=self._init_config, + outputs=self._outputs_data + ) diff --git a/python/functionstream-client/src/fs_client/exceptions.py b/python/functionstream-client/src/fs_client/exceptions.py new file mode 100644 index 00000000..69352efd --- /dev/null +++ b/python/functionstream-client/src/fs_client/exceptions.py @@ -0,0 +1,144 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Custom exceptions for Function Stream Client. + +This module maps gRPC status codes to semantic python exceptions, +allowing users to handle specific error cases (e.g., timeouts, auth failures) +granularly. +""" + +from typing import Optional + +import grpc + + +class FsError(Exception): + """Base exception for all Function Stream errors.""" + def __init__( + self, message: str, original_exception: Optional[Exception] = None + ): + super().__init__(message) + self.original_exception = original_exception + + +class ClientError(FsError): + """ + Raised when the error originates from the client side. + Examples: File not found, invalid configuration, local validation failure. + """ + + +class ServerError(FsError): + """ + Base class for errors returned by the server. + This includes both business logic errors and gRPC protocol errors. + """ + def __init__( + self, + message: str, + status_code: Optional[int] = None, + grpc_code: Optional[grpc.StatusCode] = None, + ): + super().__init__(message) + # Business logic code (e.g., HTTP-like 400/500) + self.status_code = status_code + # gRPC protocol code (e.g., UNAVAILABLE) + self.grpc_code = grpc_code + + +# --- Specific Network/Protocol Errors --- + +class NetworkError(ServerError): + """Raised when communication with the server fails (e.g., connection refused).""" + + +class FunctionStreamTimeoutError(ServerError): + """Raised when the operation timed out.""" + + +# --- Specific Business/Logic Errors (Mapped from gRPC Codes) --- + +class BadRequestError(ServerError): + """Raised when the arguments are invalid (INVALID_ARGUMENT).""" + + +class AuthenticationError(ServerError): + """Raised when the client is not authenticated (UNAUTHENTICATED).""" + + +class PermissionDeniedError(ServerError): + """Raised when the client does not have permission (PERMISSION_DENIED).""" + + +class NotFoundError(ServerError): + """Raised when a requested resource is not found (NOT_FOUND).""" + pass + + +class ConflictError(ServerError): + """Raised when a resource already exists or state conflict occurs (ALREADY_EXISTS, ABORTED).""" + pass + + +class ResourceExhaustedError(ServerError): + """Raised when the server is out of resources or quota (RESOURCE_EXHAUSTED).""" + pass + + +class InternalServerError(ServerError): + """Raised when the server encountered an internal error (INTERNAL, DATA_LOSS, UNKNOWN).""" + pass + + +# --- Mapping Logic --- + +_GRPC_CODE_TO_EXCEPTION = { + grpc.StatusCode.INVALID_ARGUMENT: BadRequestError, + grpc.StatusCode.OUT_OF_RANGE: BadRequestError, + + grpc.StatusCode.UNAUTHENTICATED: AuthenticationError, + grpc.StatusCode.PERMISSION_DENIED: PermissionDeniedError, + + grpc.StatusCode.NOT_FOUND: NotFoundError, + + grpc.StatusCode.ALREADY_EXISTS: ConflictError, + grpc.StatusCode.ABORTED: ConflictError, + + grpc.StatusCode.RESOURCE_EXHAUSTED: ResourceExhaustedError, + + grpc.StatusCode.DEADLINE_EXCEEDED: FunctionStreamTimeoutError, + grpc.StatusCode.UNAVAILABLE: NetworkError, + + grpc.StatusCode.INTERNAL: InternalServerError, + grpc.StatusCode.DATA_LOSS: InternalServerError, + grpc.StatusCode.UNKNOWN: InternalServerError, + grpc.StatusCode.UNIMPLEMENTED: InternalServerError, +} + + +def _convert_grpc_error(e: grpc.RpcError) -> FsError: + """ + Convert a gRPC RpcError into a semantic FsError subclass. + """ + code = e.code() + details = e.details() or "Unknown gRPC error" + + # 1. Look up the specific exception class + exception_cls = _GRPC_CODE_TO_EXCEPTION.get(code, ServerError) + + # 2. Create the exception message + message = f"{details} (gRPC code: {code.name if code else 'NONE'})" + + # 3. Return the instance (caller should raise it) + return exception_cls(message, grpc_code=code) diff --git a/python/functionstream-client/src/fs_client/models.py b/python/functionstream-client/src/fs_client/models.py new file mode 100644 index 00000000..fa8c452c --- /dev/null +++ b/python/functionstream-client/src/fs_client/models.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Data models for Function Stream Client. + +Provides Python-friendly dataclasses for API results, independent of protobuf. +""" + +from dataclasses import dataclass +from typing import List + + +@dataclass(frozen=True) +class FunctionInfo: + """Function metadata returned by ShowFunctions.""" + + name: str + task_type: str + status: str + + +@dataclass +class ShowFunctionsResult: + """Result of ShowFunctions RPC: status, message, and list of functions.""" + + status_code: int + message: str + functions: List[FunctionInfo] diff --git a/python/functionstream-runtime/Makefile b/python/functionstream-runtime/Makefile new file mode 100644 index 00000000..748f3638 --- /dev/null +++ b/python/functionstream-runtime/Makefile @@ -0,0 +1,144 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# --- 1. Configuration & Variables --- + +# Shell setup +SHELL := /bin/bash +.SHELLFLAGS := -eu -o pipefail -c + +# Directory paths (Absolute paths for safety) +mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) +current_dir := $(dir $(mkfile_path)) + +# [FIX 1] Define standard roots for Monorepo +# SCRIPT_DIR: Current directory (.../python/functionstream-runtime) +SCRIPT_DIR := $(current_dir) + +# PYTHON_ROOT: Parent directory (.../python) - Used to find siblings like 'api' +PYTHON_ROOT := $(abspath $(current_dir)/..) + +# MONOREPO_ROOT: Grandparent directory (.../function-stream) - Used to find .venv +MONOREPO_ROOT := $(abspath $(current_dir)/../..) + +# Output paths +OUTPUT_DIR := $(SCRIPT_DIR)/target +WASM_OUTPUT := $(OUTPUT_DIR)/functionstream-python-runtime.wasm + +# Cache directory for distribution +CACHE_DIR := $(MONOREPO_ROOT)/data/cache/python-runner + +# [FIX 2] Virtual Environment (Located at Monorepo Root) +VENV_DIR := $(MONOREPO_ROOT)/.venv +VENV_BIN := $(VENV_DIR)/bin +PYTHON := $(VENV_BIN)/python +PIP := $(VENV_BIN)/pip +COMPONENTIZE := $(VENV_BIN)/componentize-py + +# --- 2. Colors & logging --- +ifneq ($(shell test -t 0; echo $$?),0) + color_reset := + color_green := + color_blue := + color_red := +else + color_reset := $(shell tput sgr0) + color_green := $(shell tput setaf 2) + color_blue := $(shell tput setaf 4) + color_red := $(shell tput setaf 1) +endif + +define log_info + @echo "$(color_blue)[INFO]$(color_reset) $1" +endef + +define log_success + @echo "$(color_green)[SUCCESS]$(color_reset) $1" +endef + +define log_error + @echo "$(color_red)[ERROR]$(color_reset) $1" +endef + +# --- 3. Targets --- + +.PHONY: all build clean install-deps venv check-env help + +# Default target +all: build + +# Ensure venv exists (Delegate to root venv creation if needed, or just warn) +$(PYTHON): + $(call log_info, Virtual environment not found at $(VENV_DIR)) + $(call log_info, Please run 'python3 -m venv .venv' in the root 'function-stream' directory first.) + @exit 1 + +venv: $(PYTHON) + +# Install dependencies +install-deps: venv + $(call log_info, Installing build dependencies...) + @$(PIP) install componentize-py + @# [FIX 3] Use PYTHON_ROOT to find the sibling 'functionstream-api' + @$(PIP) install -e $(PYTHON_ROOT)/functionstream-api + @$(call log_success, Dependencies installed.) + +# Build the wasm component +build: venv + @# Check if componentize-py is installed + @if [ ! -f "$(COMPONENTIZE)" ]; then \ + echo "$(color_red)[ERROR]$(color_reset) componentize-py not found. Run 'make install-deps' first."; \ + exit 1; \ + fi + $(call log_info, Building WASM component...) + @mkdir -p $(OUTPUT_DIR) + @# [FIX 4] PYTHONPATH points to the sibling API correctly + @PYTHONPATH=$(PYTHON_ROOT)/functionstream-api $(PYTHON) $(SCRIPT_DIR)/build.py + @if [ -f "$(WASM_OUTPUT)" ]; then \ + echo "$(color_green)[SUCCESS]$(color_reset) Build complete: $(WASM_OUTPUT)"; \ + ls -lh $(WASM_OUTPUT); \ + mkdir -p $(CACHE_DIR); \ + cp $(WASM_OUTPUT) $(CACHE_DIR)/; \ + echo "$(color_green)[SUCCESS]$(color_reset) Copied to: $(CACHE_DIR)/functionstream-python-runtime.wasm"; \ + else \ + echo "$(color_red)[ERROR]$(color_reset) Build failed: Output file missing"; \ + exit 1; \ + fi + +# Clean artifacts +clean: + $(call log_info, Cleaning artifacts...) + @rm -rf $(OUTPUT_DIR) + @rm -rf $(SCRIPT_DIR)/bindings + @rm -rf $(SCRIPT_DIR)/dependencies + @find $(SCRIPT_DIR) -type d -name "__pycache__" -exec rm -rf {} + + @find $(SCRIPT_DIR) -type d -name "*.egg-info" -exec rm -rf {} + + @find $(SCRIPT_DIR) -type d -name ".pytest_cache" -exec rm -rf {} + + $(call log_success, Clean completed.) + +# Self-documenting help command +help: + @echo "$(color_blue)Function Stream Runtime Build System$(color_reset)" + @echo "" + @echo "Usage:" + @echo " make $(color_green)target$(color_reset)" + @echo "" + @echo "Targets:" + @awk '/^[a-zA-Z\-\_0-9]+:/ { \ + helpMessage = match(lastLine, /^# (.*)/); \ + if (helpMessage) { \ + helpCommand = substr($$1, 0, index($$1, ":")-1); \ + helpMessage = substr(lastLine, RSTART + 2, RLENGTH); \ + printf " $(color_green)%-20s$(color_reset) %s\n", helpCommand, helpMessage; \ + } \ + } \ + { lastLine = $$0 }' $(MAKEFILE_LIST) \ No newline at end of file diff --git a/python/functionstream-runtime/build.py b/python/functionstream-runtime/build.py new file mode 100755 index 00000000..498ae1c9 --- /dev/null +++ b/python/functionstream-runtime/build.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +wasm Build Script for Function Stream Runtime. + +This script compiles the python runtime into a WebAssembly component +using componentize-py. It handles dependency installation, WIT binding +generation, and the final wasm compilation. +""" + +import os +import sys +import shutil +import logging +import subprocess +from pathlib import Path + +# --- Configuration --- + +# Paths (Relative to this script) +SCRIPT_DIR = Path(__file__).parent.absolute() +PROJECT_ROOT = SCRIPT_DIR.parent.parent +WIT_DIR = PROJECT_ROOT / "wit" +WIT_FILE = WIT_DIR / "processor.wit" + +# Source & Artifact Paths +SRC_DIR = SCRIPT_DIR / "src" +DEPENDENCIES_DIR = SCRIPT_DIR / "dependencies" +BINDINGS_DIR = SCRIPT_DIR / "bindings" +TARGET_DIR = SCRIPT_DIR / "target" +WASM_OUTPUT = TARGET_DIR / "functionstream-python-runtime.wasm" + +# Sibling Projects +FS_API_DIR = SCRIPT_DIR.parent / "functionstream-api" + +# Component Configuration +WORLD_NAME = "processor" +MAIN_MODULE = "fs_runtime.runner" # The entry point module + +# Logging Setup +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - [%(levelname)s] - %(message)s", + datefmt="%H:%M:%S" +) +logger = logging.getLogger("fs_wasm_builder") + + +class BuildError(Exception): + """Custom exception for build failures.""" + + +class WasmBuilder: + """Encapsulates the wasm build process lifecycle.""" + + def __init__(self): + self.python_exec = sys.executable + self.componentize_cmd = self._find_componentize_py() + + def _find_componentize_py(self) -> str: + """ + Locates the 'componentize-py' executable. + Prioritizes the active virtual environment, then system PATH. + """ + # 1. Check current venv bin (Reliable method) + venv_bin = Path(sys.executable).parent + candidate = venv_bin / "componentize-py" + if candidate.exists() and os.access(candidate, os.X_OK): + return str(candidate) + + # 2. Check standard PATH (Fallback) + path_cmd = shutil.which("componentize-py") + if path_cmd: + return path_cmd + + logger.error("❌ 'componentize-py' not found.") + logger.info( + "Please install it in your environment: pip install componentize-py" + ) + raise BuildError("Dependency missing: componentize-py") + + def clean(self) -> None: + """ + Cleans build artifacts and temporary directories. + CRITICAL: Removes 'bindings' to prevent 'File exists' errors. + """ + logger.info("Cleaning build artifacts...") + + dirs_to_clean = [TARGET_DIR, DEPENDENCIES_DIR, BINDINGS_DIR] + + for d in dirs_to_clean: + if d.exists(): + try: + shutil.rmtree(d) + logger.debug(f"Removed: {d}") + except OSError as e: + logger.warning(f"Failed to remove {d}: {e}") + + logger.info("✓ Clean completed.") + + def prepare_dependencies(self) -> None: + """Installs local dependencies (functionstream-api) into a temp dir.""" + logger.info("Preparing dependencies...") + + if not FS_API_DIR.exists(): + raise BuildError( + f"functionstream-api source not found at: {FS_API_DIR}" + ) + + DEPENDENCIES_DIR.mkdir(parents=True, exist_ok=True) + + # Install fs-api to local dependencies folder using pip + cmd = [ + self.python_exec, "-m", "pip", "install", + "--target", str(DEPENDENCIES_DIR), + str(FS_API_DIR) + ] + + logger.debug(f"Exec: {' '.join(cmd)}") + try: + # Note: subprocess.run is safe here as cmd is constructed from + # controlled build-time inputs, not user-provided data + subprocess.run( # noqa: S603 + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + logger.info("✓ Dependencies prepared (fs-api installed).") + except subprocess.CalledProcessError as e: + logger.error("Failed to install dependencies.") + logger.error(f"STDERR: {e.stderr}") + raise BuildError("Dependency installation failed.") + + def generate_bindings(self) -> None: + """Generates WIT bindings for reference/development.""" + logger.info("Generating WIT bindings...") + + if not WIT_FILE.exists(): + raise BuildError(f"WIT file not found: {WIT_FILE}") + + # Ensure directory is fresh + BINDINGS_DIR.mkdir(parents=True, exist_ok=True) + + cmd = [ + self.componentize_cmd, + "-d", str(WIT_FILE), + "-w", WORLD_NAME, + "bindings", + str(BINDINGS_DIR) + ] + + logger.debug(f"Exec: {' '.join(cmd)}") + try: + # Note: subprocess.run is safe here as cmd is constructed from + # controlled build-time inputs, not user-provided data + subprocess.run( # noqa: S603 + cmd, + cwd=SCRIPT_DIR, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + logger.info(f"✓ Bindings generated at: {BINDINGS_DIR}") + except subprocess.CalledProcessError as e: + logger.warning(f"Failed to generate bindings: {e.stderr}") + logger.warning( + "Continuing build process (bindings are optional for the artifact)..." + ) + + def build_wasm(self) -> None: + """Compiles the python code into a wasm component.""" + logger.info("Compiling wasm component...") + + TARGET_DIR.mkdir(parents=True, exist_ok=True) + + # Validate entry point existence + # Assumes structure: src/fs_runtime/runner.py + module_path = MAIN_MODULE.replace(".", "/") + ".py" + entry_file = SRC_DIR / module_path + + if not entry_file.exists(): + raise BuildError(f"Entry point not found: {entry_file}") + + cmd = [ + self.componentize_cmd, + "-d", str(WIT_FILE), + "-w", WORLD_NAME, + "componentize", + "--stub-wasi", + "-p", str(DEPENDENCIES_DIR), # Path 1: Pre-installed deps (fs-api) + "-p", str(SRC_DIR), # Path 2: Runtime source code + MAIN_MODULE, # Entry module name + "-o", str(WASM_OUTPUT) + ] + + logger.info("Executing componentize command...") + logger.debug(f"Command: {' '.join(cmd)}") + + try: + # Inherit environment to keep PATH settings + env = os.environ.copy() + + # Note: subprocess.run is safe here as cmd is constructed from + # controlled build-time inputs, not user-provided data + process = subprocess.run( # noqa: S603 + cmd, + cwd=SCRIPT_DIR, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + env=env + ) + + # componentize-py outputs helpful info to stdout + if process.stdout: + logger.debug(process.stdout) + + if WASM_OUTPUT.exists(): + size_kb = WASM_OUTPUT.stat().st_size / 1024 + logger.info("✓ wasm Build Successful!") + logger.info(f" Output: {WASM_OUTPUT}") + logger.info(f" Size: {size_kb:.2f} KB") + else: + raise BuildError( + "Build command succeeded but output file is missing." + ) + + except subprocess.CalledProcessError as e: + logger.error("wasm compilation failed.") + logger.error(f"STDOUT: {e.stdout}") + logger.error(f"STDERR: {e.stderr}") + raise BuildError("wasm compilation failed.") + + def run(self) -> None: + """Main execution flow.""" + print("=" * 60) + print(" Function Stream Runtime - wasm Builder") + print("=" * 60) + + try: + # 1. Clean first (Fixes 'File exists' errors) + self.clean() + + # 2. Prepare environment + self.prepare_dependencies() + + # 3. Generate bindings (Optional but good for checking WIT) + self.generate_bindings() + + # 4. Build actual artifact + self.build_wasm() + + print("\n" + "=" * 60) + logger.info("Process completed successfully.") + print("=" * 60) + + except BuildError as e: + logger.error(f"Build failed: {e}") + sys.exit(1) + except KeyboardInterrupt: + logger.error("Build interrupted.") + sys.exit(130) + except Exception: + logger.exception("Unexpected error occurred:") + sys.exit(1) + + +def main(): + builder = WasmBuilder() + builder.run() + + +if __name__ == "__main__": + main() diff --git a/python/functionstream-runtime/pyproject.toml b/python/functionstream-runtime/pyproject.toml new file mode 100644 index 00000000..8b12adc9 --- /dev/null +++ b/python/functionstream-runtime/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "functionstream-runtime" +version = "1.0.0" +description = "Function Stream Runtime - WASM kernel and WIT implementation" +readme = "README.md" +requires-python = ">=3.7" +license = "Apache-2.0" +authors = [ + {name = "Function Stream Team"} +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", +] +dependencies = [ + "functionstream-api>=0.0.1", + "cloudpickle>=2.0.0", +] + +[tool.setuptools] +package-dir = {"" = "src"} +packages = ["fs_runtime"] + diff --git a/.github/workflows/license.yaml b/python/functionstream-runtime/src/fs_runtime/__init__.py similarity index 57% rename from .github/workflows/license.yaml rename to python/functionstream-runtime/src/fs_runtime/__init__.py index 17e995c8..da442bcc 100644 --- a/.github/workflows/license.yaml +++ b/python/functionstream-runtime/src/fs_runtime/__init__.py @@ -1,5 +1,3 @@ -# Copyright 2024 Function Stream Org. -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -7,24 +5,23 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +functionstream-runtime (wasm kernel & WIT implementation) + +Contains real WIT bindings, only exists on the server side +""" + +__version__ = "1.0.0" + +from .runner import WitWorld +from .store.fs_context import WitContext -name: License Check -on: - pull_request: - branches: - - main - push: - branches: - - main -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Run license check - run: make license \ No newline at end of file +__all__ = [ + "WitWorld", + "WitContext", +] diff --git a/python/functionstream-runtime/src/fs_runtime/runner.py b/python/functionstream-runtime/src/fs_runtime/runner.py new file mode 100644 index 00000000..27618799 --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/runner.py @@ -0,0 +1,175 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.util +import logging +import sys +from typing import List, Optional, Tuple + +from fs_api.driver import FSProcessorDriver + +logger = logging.getLogger(__name__) + +from .store.fs_context import WitContext, convert_config_to_dict + + +_DRIVER: Optional[FSProcessorDriver] = None +_CONTEXT: Optional[WitContext] = None + + +def fs_exec(class_name: str, modules: List[Tuple[str, bytes]]) -> None: + global _DRIVER + + try: + # Load all modules in order + loaded_modules = {} + for module_name, module_bytes in modules: + # Decode module bytes to string (assuming UTF-8 encoding) + try: + module_source = module_bytes.decode("utf-8") + except UnicodeDecodeError: + raise ValueError( + f"Failed to decode module_bytes as UTF-8 for module '{module_name}'" + ) + + # Create a module spec from the module name + spec = importlib.util.spec_from_loader(module_name, loader=None) + if spec is None: + raise RuntimeError(f"Failed to create module spec for {module_name}") + + # Create the module + module = importlib.util.module_from_spec(spec) + + # Execute the module source code. + # exec is required: importlib.util.module_from_spec does not execute + # code; Python's import system uses exec internally. This runtime + # is designed to execute trusted user-provided processor code in + # an isolated WASM sandbox. Only deploy code from trusted sources. + code = compile(module_source, f"<{module_name}>", "exec") + exec(code, module.__dict__) # noqa: S102 # nosec B102 + + # Add the module to sys.modules + sys.modules[module_name] = module + loaded_modules[module_name] = module + + # Try to find the class in all loaded modules + ProcessorClass = None + + # First, try to find in the last module (most likely location) + if modules: + last_module_name = modules[-1][0] + if last_module_name in loaded_modules: + module = loaded_modules[last_module_name] + if hasattr(module, class_name): + ProcessorClass = getattr(module, class_name) + + # If not found, search in all modules + if ProcessorClass is None: + for module_name, module in loaded_modules.items(): + if hasattr(module, class_name): + ProcessorClass = getattr(module, class_name) + break + + if ProcessorClass is None: + module_names = [name for name, _ in modules] + raise RuntimeError( + f"Class '{class_name}' not found in any of the loaded modules: {module_names}" + ) + + if not issubclass(ProcessorClass, FSProcessorDriver): + raise TypeError( + f"Class '{class_name}' must be a subclass of FSProcessorDriver" + ) + + _DRIVER = ProcessorClass() + + except Exception as e: + raise RuntimeError( + f"Failed to load class '{class_name}' from modules: {e}" + ) from e + + +class WitWorld: + + def fs_init(self, config: List[Tuple[str, str]]) -> None: + global _CONTEXT + + config_dict = convert_config_to_dict(config) + + _CONTEXT = WitContext(config_dict) + + if _DRIVER: + try: + _DRIVER.init(_CONTEXT, _CONTEXT._CONFIG) + except Exception as e: + logger.debug("Driver init failed (graceful degradation): %s", e) + + def fs_process(self, source_id: int, data: bytes) -> None: + if not _DRIVER or not _CONTEXT: + return + + try: + _DRIVER.process(_CONTEXT, source_id, data) + except Exception as e: + logger.debug("Process error (graceful degradation): %s", e) + + def fs_process_watermark(self, source_id: int, watermark: int) -> None: + if not _DRIVER or not _CONTEXT: + return + + try: + _DRIVER.process_watermark(_CONTEXT, source_id, watermark) + except Exception as e: + logger.debug("Watermark process error (graceful degradation): %s", e) + + def fs_take_checkpoint(self, checkpoint_id: int) -> None: + if not _DRIVER or not _CONTEXT: + return + + try: + _DRIVER.take_checkpoint(_CONTEXT, checkpoint_id) + except Exception as e: + logger.debug("Checkpoint error (graceful degradation): %s", e) + + def fs_check_heartbeat(self) -> bool: + if not _DRIVER or not _CONTEXT: + return False + + try: + return _DRIVER.check_heartbeat(_CONTEXT) + except Exception as e: + logger.debug("Heartbeat check failed (graceful degradation): %s", e) + return False + + def fs_close(self) -> None: + global _DRIVER, _CONTEXT + + if _DRIVER and _CONTEXT: + try: + _DRIVER.close(_CONTEXT) + except Exception as e: + logger.debug("Driver close error (cleanup continues): %s", e) + + _DRIVER = None + _CONTEXT = None + + def fs_exec(self, class_name: str, modules: List[Tuple[str, bytes]]) -> None: + fs_exec(class_name, modules) + + def fs_custom(self, payload: bytes) -> bytes: + if not _DRIVER or not _CONTEXT: + raise RuntimeError("Driver or Context not initialized") + + return _DRIVER.custom(payload) + + +__all__ = ['WitWorld'] diff --git a/python/functionstream-runtime/src/fs_runtime/store/__init__.py b/python/functionstream-runtime/src/fs_runtime/store/__init__.py new file mode 100644 index 00000000..b99423d9 --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/__init__.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .fs_error import wit_to_api_error, api_to_wit_error +from .fs_complex_key import api_to_wit, wit_to_api +from .fs_iterator import FSIterator +from .fs_store import FSStore +from .fs_collector import emit, emit_watermark +from .fs_context import WitContext, convert_config_to_dict + +try: + from wit_world.imports import kv, collector + _WIT_AVAILABLE = True +except ImportError: + kv = None + collector = None + _WIT_AVAILABLE = False + +__all__ = [ + 'wit_to_api_error', 'api_to_wit_error', + 'api_to_wit', 'wit_to_api', + 'FSIterator', 'FSStore', + 'emit', 'emit_watermark', + 'WitContext', 'convert_config_to_dict', + 'kv', 'collector', +] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_collector.py b/python/functionstream-runtime/src/fs_runtime/store/fs_collector.py new file mode 100644 index 00000000..f9bcff5b --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_collector.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from wit_world.imports.collector import ( + emit as wit_emit, + emit_watermark as wit_emit_watermark, + ) +else: + try: + from wit_world.imports.collector import ( + emit as wit_emit, + emit_watermark as wit_emit_watermark, + ) + except (ImportError, AttributeError): + wit_emit = None + wit_emit_watermark = None + + +def emit(data: bytes, channel: int = 0) -> None: + if wit_emit is None: + raise RuntimeError("WIT Collector binding is not available") + + try: + wit_emit(channel, data) + except Exception as e: + logger.debug("Emit error (graceful degradation): %s", e) + + +def emit_watermark(watermark: int, channel: int = 0) -> None: + if wit_emit_watermark is None: + raise RuntimeError("WIT Collector binding is not available") + + try: + wit_emit_watermark(channel, watermark) + except Exception as e: + logger.debug( + "Watermark emit error (graceful degradation): %s", e + ) + + +__all__ = ['emit', 'emit_watermark'] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_complex_key.py b/python/functionstream-runtime/src/fs_runtime/store/fs_complex_key.py new file mode 100644 index 00000000..025abdcd --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_complex_key.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fs_api.store.complexkey import ComplexKey as ApiComplexKey + from wit_world.imports.kv import ComplexKey as WitComplexKey +else: + from fs_api.store.complexkey import ComplexKey as ApiComplexKey + try: + from wit_world.imports.kv import ComplexKey as WitComplexKey + except (ImportError, AttributeError): + WitComplexKey = None + + +def api_to_wit(api_key: 'ApiComplexKey') -> 'WitComplexKey': + if WitComplexKey is not None: + return WitComplexKey( + key_group=api_key.key_group, + key=api_key.key, + namespace=api_key.namespace, + user_key=api_key.user_key, + ) + else: + return { + 'key_group': api_key.key_group, + 'key': api_key.key, + 'namespace': api_key.namespace, + 'user_key': api_key.user_key, + } + + +def wit_to_api(wit_key: 'WitComplexKey') -> 'ApiComplexKey': + from fs_api.store.complexkey import ComplexKey + + if isinstance(wit_key, dict): + return ComplexKey( + key_group=wit_key.get('key_group', b''), + key=wit_key.get('key', b''), + namespace=wit_key.get('namespace', b''), + user_key=wit_key.get('user_key', b''), + ) + else: + return ComplexKey( + key_group=wit_key.key_group, + key=wit_key.key, + namespace=wit_key.namespace, + user_key=wit_key.user_key, + ) + + +__all__ = ['api_to_wit', 'wit_to_api'] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_context.py b/python/functionstream-runtime/src/fs_runtime/store/fs_context.py new file mode 100644 index 00000000..0dd9d624 --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_context.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple + +from fs_api.context import Context +from fs_api.store import KvStore + +from .fs_collector import emit, emit_watermark +from .fs_store import FSStore + + +def convert_config_to_dict(config: List[Tuple[str, str]]) -> Dict[str, str]: + result: Dict[str, str] = {} + + if not config: + return result + + for item in config: + if isinstance(item, (list, tuple)): + if len(item) >= 2: + key = str(item[0]) + value = str(item[1]) + result[key] = value + elif isinstance(item, dict): + for k, v in item.items(): + result[str(k)] = str(v) + else: + continue + + return result + + +class WitContext(Context): + + def __init__(self, config: Dict[str, str] = None): + self._store_cache: dict[str, KvStore] = {} + self._CONFIG: Dict[str, str] = config.copy() if config is not None else {} + + def emit(self, data: bytes, channel: int = 0) -> None: + emit(data, channel) + + def emit_watermark(self, watermark: int, channel: int = 0) -> None: + emit_watermark(watermark, channel) + + def getOrCreateKVStore(self, name: str) -> KvStore: + if name not in self._store_cache: + self._store_cache[name] = FSStore(name) + return self._store_cache[name] + + def getConfig(self) -> Dict[str, str]: + return self._CONFIG.copy() + + +__all__ = ['WitContext', 'convert_config_to_dict'] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_error.py b/python/functionstream-runtime/src/fs_runtime/store/fs_error.py new file mode 100644 index 00000000..6cf8e4f0 --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_error.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from fs_api.store.error import ( + KvError, + KvNotFoundError, + KvIOError, + KvOtherError, +) + +if TYPE_CHECKING: + from wit_world.imports.kv import ( + Error_NotFound, + Error_IoError, + Error_Other, + ) +else: + try: + from wit_world.imports.kv import ( + Error_NotFound, + Error_IoError, + Error_Other, + ) + except (ImportError, AttributeError): + Error_NotFound = None + Error_IoError = None + Error_Other = None + + +def wit_to_api_error(wit_error) -> KvError: + if Error_NotFound is not None and isinstance(wit_error, Error_NotFound): + return KvNotFoundError() + elif Error_IoError is not None and isinstance(wit_error, Error_IoError): + return KvIOError(wit_error.value) + elif Error_Other is not None and isinstance(wit_error, Error_Other): + return KvOtherError(wit_error.value) + + if wit_error == "not-found" or (isinstance(wit_error, str) and wit_error == "not-found"): + return KvNotFoundError() + elif isinstance(wit_error, tuple) and len(wit_error) > 0: + if wit_error[0] == "io-error": + message = wit_error[1] if len(wit_error) > 1 else "IO error" + return KvIOError(message) + elif wit_error[0] == "other": + message = wit_error[1] if len(wit_error) > 1 else "Other error" + return KvOtherError(message) + + return KvOtherError("Unknown error") + + +def api_to_wit_error(py_error: KvError): + if Error_NotFound is not None and Error_IoError is not None and Error_Other is not None: + if isinstance(py_error, KvNotFoundError): + return Error_NotFound() + elif isinstance(py_error, KvIOError): + message = py_error.message if hasattr(py_error, 'message') else str(py_error) + return Error_IoError(value=message) + elif isinstance(py_error, KvOtherError): + message = py_error.message if hasattr(py_error, 'message') else str(py_error) + return Error_Other(value=message) + else: + return Error_Other(value=str(py_error)) + else: + if isinstance(py_error, KvNotFoundError): + return "not-found" + elif isinstance(py_error, KvIOError): + message = py_error.message if hasattr(py_error, 'message') else "IO error" + return ("io-error", message) + elif isinstance(py_error, KvOtherError): + message = py_error.message if hasattr(py_error, 'message') else "Other error" + return ("other", message) + else: + return ("other", str(py_error)) + + +__all__ = ['wit_to_api_error', 'api_to_wit_error'] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_iterator.py b/python/functionstream-runtime/src/fs_runtime/store/fs_iterator.py new file mode 100644 index 00000000..df44fe2b --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_iterator.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Optional, Tuple + +from fs_api.store.iterator import KvIterator + +from .fs_error import wit_to_api_error + +if TYPE_CHECKING: + from wit_world.imports.kv import Iterator as WitIterator +else: + try: + from wit_world.imports.kv import Iterator as WitIterator + except (ImportError, AttributeError): + WitIterator = None + + +class FSIterator(KvIterator): + + def __init__(self, wit_iterator: 'WitIterator'): + if WitIterator is None: + raise RuntimeError("WIT Iterator binding is not available") + if not isinstance(wit_iterator, WitIterator): + raise TypeError(f"Expected WitIterator, got {type(wit_iterator)}") + + self._iterator: WitIterator = wit_iterator + + def has_next(self) -> bool: + try: + return self._iterator.has_next() + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def next(self) -> Optional[Tuple[bytes, bytes]]: + try: + return self._iterator.next() + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + +__all__ = ['FSIterator'] + diff --git a/python/functionstream-runtime/src/fs_runtime/store/fs_store.py b/python/functionstream-runtime/src/fs_runtime/store/fs_store.py new file mode 100644 index 00000000..332861df --- /dev/null +++ b/python/functionstream-runtime/src/fs_runtime/store/fs_store.py @@ -0,0 +1,131 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, List, Optional + +from fs_api.store import ComplexKey, KvIterator, KvStore + +from .fs_complex_key import api_to_wit +from .fs_error import wit_to_api_error +from .fs_iterator import FSIterator + +if TYPE_CHECKING: + from wit_world.imports.kv import Store as WitStore +else: + try: + from wit_world.imports.kv import Store as WitStore + except (ImportError, AttributeError): + WitStore = None + + +class FSStore(KvStore): + + def __init__(self, name: str): + if WitStore is None: + raise RuntimeError("WIT Store binding is not available") + + self._store: WitStore = WitStore(name) + self._name = name + + def put_state(self, key: bytes, value: bytes) -> None: + try: + self._store.put_state(key, value) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def get_state(self, key: bytes) -> Optional[bytes]: + try: + return self._store.get_state(key) + except Exception as e: + api_error = wit_to_api_error(e) + from fs_api.store.error import KvNotFoundError + if isinstance(api_error, KvNotFoundError): + return None + raise api_error + + def delete_state(self, key: bytes) -> None: + try: + self._store.delete_state(key) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def list_states(self, start_inclusive: bytes, end_exclusive: bytes) -> List[bytes]: + try: + return self._store.list_states(start_inclusive, end_exclusive) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def put(self, key: ComplexKey, value: bytes) -> None: + try: + wit_key = api_to_wit(key) + self._store.put(wit_key, value) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def get(self, key: ComplexKey) -> Optional[bytes]: + try: + wit_key = api_to_wit(key) + return self._store.get(wit_key) + except Exception as e: + api_error = wit_to_api_error(e) + from fs_api.store.error import KvNotFoundError + if isinstance(api_error, KvNotFoundError): + return None + raise api_error + + def delete(self, key: ComplexKey) -> None: + try: + wit_key = api_to_wit(key) + self._store.delete(wit_key) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def merge(self, key: ComplexKey, value: bytes) -> None: + try: + wit_key = api_to_wit(key) + self._store.merge(wit_key, value) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def delete_prefix(self, key: ComplexKey) -> None: + try: + wit_key = api_to_wit(key) + self._store.delete_prefix(wit_key) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def list_complex(self, key_group: bytes, key: bytes, namespace: bytes, + start_inclusive: bytes, end_exclusive: bytes) -> List[bytes]: + try: + return self._store.list_complex(key_group, key, namespace, start_inclusive, end_exclusive) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + def scan_complex(self, key_group: bytes, key: bytes, namespace: bytes) -> KvIterator: + try: + wit_iterator = self._store.scan_complex(key_group, key, namespace) + return FSIterator(wit_iterator) + except Exception as e: + api_error = wit_to_api_error(e) + raise api_error + + +__all__ = ['FSStore'] + diff --git a/sdks/fs-python/.gitignore b/sdks/fs-python/.gitignore deleted file mode 100644 index 330a94d3..00000000 --- a/sdks/fs-python/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -venv -*.egg-info -**/__pycache__ -build \ No newline at end of file diff --git a/sdks/fs-python/Dockerfile b/sdks/fs-python/Dockerfile deleted file mode 100644 index 48a4cf3e..00000000 --- a/sdks/fs-python/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM python:3.13-slim - -WORKDIR /function - -# Copy the SDK files -COPY . /function/ - -# Install the SDK -RUN pip install . - -# Set the default command -CMD ["python", "/function/function.py"] \ No newline at end of file diff --git a/sdks/fs-python/Makefile b/sdks/fs-python/Makefile deleted file mode 100644 index 1092e8d1..00000000 --- a/sdks/fs-python/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: build-image - -build-image: - docker build -t functionstream/fs-python-base . - -docker-buildx: - docker buildx build --platform linux/amd64,linux/arm64 -t functionstream/fs-python-base . - -test: - PYTHONPATH=. python -m pytest \ No newline at end of file diff --git a/sdks/fs-python/README.md b/sdks/fs-python/README.md deleted file mode 100644 index a90d6983..00000000 --- a/sdks/fs-python/README.md +++ /dev/null @@ -1,197 +0,0 @@ - - -# FunctionStream Python SDK - -FunctionStream SDK is a powerful Python library for building and deploying serverless functions that process messages -from Apache Pulsar. It provides a simple yet flexible framework for creating event-driven applications with robust error -handling, metrics collection, and resource management. - -## Features - -- **Easy Function Development**: Simple API for creating serverless functions -- **Message Processing**: Built-in support for Apache Pulsar message processing -- **Metrics Collection**: Automatic collection of performance metrics -- **Resource Management**: Efficient handling of connections and resources -- **Configuration Management**: Flexible configuration through YAML files -- **Error Handling**: Comprehensive error handling and logging - -## Installation - -```bash -pip install function-stream -``` - -## Quick Start - -1. Create a function that processes messages: - -```python -from function_stream import FSFunction - -async def my_process_function(request_data: dict) -> dict: - # Process the request data - result = process_data(request_data) - return {"result": result} - -# Initialize and run the function -function = FSFunction( - process_funcs={ - 'my_module': my_process_function - } -) - -await function.start() -``` - -2. Create a configuration file (`config.yaml`): - -```yaml -pulsar: - service_url: "pulsar://localhost:6650" - authPlugin: "" # Optional - authParams: "" # Optional - -module: "my_module" -subscriptionName: "my-subscription" - -requestSource: - - pulsar: - topic: "input-topic" - -sink: - pulsar: - topic: "output-topic" -``` - -3. Define your function package (`package.yaml`): - -```yaml -name: my_function -type: pulsar -modules: - my_module: - name: my_process - description: "Process incoming messages" - inputSchema: - type: object - properties: - data: - type: string - required: - - data - outputSchema: - type: object - properties: - result: - type: string -``` - -## Core Components - -### FSFunction - -The main class for creating serverless functions. It handles: - -- Message consumption and processing -- Response generation -- Resource management -- Metrics collection -- Error handling - -### Configuration - -The SDK uses YAML configuration files to define: - -- Pulsar connection settings -- Module selection -- Topic subscriptions -- Input/output topics -- Custom configuration parameters - -### Metrics - -Built-in metrics collection for: - -- Request processing time -- Success/failure rates -- Message throughput -- Resource utilization - -## Examples - -Check out the `examples` directory for complete examples: - -- `string_function.py`: A simple string processing function -- `test_string_function.py`: Test client for the string function -- `config.yaml`: Example configuration -- `package.yaml`: Example package definition - -## Best Practices - -1. **Error Handling** - - Always handle exceptions in your process functions - - Use proper logging for debugging - - Implement graceful shutdown - -2. **Resource Management** - - Close resources properly - - Use context managers when possible - - Monitor resource usage - -3. **Configuration** - - Use environment variables for sensitive data - - Validate configuration values - - Document configuration options - -4. **Testing** - - Write unit tests for your functions - - Test error scenarios - - Validate input/output schemas - -## Development - -### Prerequisites - -- Python 3.7+ -- Apache Pulsar -- pip - -### Setup Development Environment - -```bash -# Create virtual environment -python -m venv venv -source venv/bin/activate # Linux/Mac -# or -.\venv\Scripts\activate # Windows - -# Install dependencies -pip install -r requirements.txt - -# Install the package in development mode -python -m pip install -e . -``` - -### Running Tests - -```bash -make test -``` - -## Support - -For support, please open an issue in the GitHub repository or contact the maintainers. \ No newline at end of file diff --git a/sdks/fs-python/examples/Dockerfile b/sdks/fs-python/examples/Dockerfile deleted file mode 100644 index 2497a3af..00000000 --- a/sdks/fs-python/examples/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM functionstream/fs-python-base:latest - -WORKDIR /function - -COPY requirements.txt . -RUN pip install -r requirements.txt - -COPY main.py . - -RUN chmod +x main.py - -CMD ["python", "main.py"] \ No newline at end of file diff --git a/sdks/fs-python/examples/Makefile b/sdks/fs-python/examples/Makefile deleted file mode 100644 index 05a1655b..00000000 --- a/sdks/fs-python/examples/Makefile +++ /dev/null @@ -1,5 +0,0 @@ - -build-image: - docker build -t my-function:latest . - -.DEFAULT_GOAL := build-image \ No newline at end of file diff --git a/sdks/fs-python/examples/config.yaml b/sdks/fs-python/examples/config.yaml deleted file mode 100644 index 12cd1095..00000000 --- a/sdks/fs-python/examples/config.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# FunctionStream Configuration File -# This configuration file defines the settings for the string processing function example. - -pulsar: - serviceUrl: "pulsar://127.0.0.1:6650" # Required: URL of the Pulsar broker - authPlugin: "" # Optional: Authentication plugin class name - authParams: "" # Optional: Authentication parameters - -module: "string" # Required: Name of the module to use for processing - -# Optional: List of source topics to consume from -# Note: Either sources or requestSource must be specified -sources: - - pulsar: # SourceSpec structure with pulsar configuration - topic: "topic-a" # Topic name for regular message consumption - -# Optional: request source -requestSource: - pulsar: # SourceSpec structure with pulsar configuration - topic: "string-topic" # Topic name for request messages - -# Required: Name of the subscription for the consumer -subscriptionName: "test-sub" - -# Optional: Output sink configuration -sink: - pulsar: # SinkSpec structure with pulsar configuration - topic: "output" # Topic name for output messages - -# Optional: Additional configuration parameters -config: - test: "Hello from config" # Example configuration value - test2: "Another config value" # Another example configuration value \ No newline at end of file diff --git a/sdks/fs-python/examples/main.py b/sdks/fs-python/examples/main.py deleted file mode 100644 index 5a67a039..00000000 --- a/sdks/fs-python/examples/main.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -String Processing Function Example - -This module demonstrates a simple string processing function that appends an exclamation mark -to the input text. It serves as a basic example of how to create and run a FunctionStream -serverless function. - -The function: -1. Receives a request containing a text field -2. Appends an exclamation mark to the text -3. Returns the modified text in a response - -This example shows the basic structure of a FunctionStream function, including: -- Function definition and implementation -- FSFunction initialization -- Service startup and graceful shutdown -- Error handling -""" - -import asyncio -from typing import Dict, Any - -from function_stream import FSFunction, FSContext - -async def string_process_function(context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - """ - Process a string by appending an exclamation mark. - - This function demonstrates a simple string transformation that can be used - as a building block for more complex text processing pipelines. - - Args: - data (Dict[str, Any]): Request data containing a 'text' field with the input string - - Returns: - Dict[str, Any]: Response containing the processed string with an exclamation mark appended - - Example: - Input: {"text": "Hello"} - Output: {"result": "Hello!"} - """ - # Extract the input text from the request data - text = data.get('text', '') - - # Append an exclamation mark to the text - result = f"{text}!" - - # Log the result for debugging purposes - print(f"Result: {result}") - print(f"Config: {context.get_config('test')}") - - return {"result": result} - -async def main(): - """ - Main function to initialize and run the string processing service. - - This function: - 1. Creates an FSFunction instance with the string processing function - 2. Starts the service - 3. Handles graceful shutdown and error cases - """ - # Initialize the FunctionStream function with our string processor - function = FSFunction( - process_funcs={ - 'string': string_process_function - } - ) - - try: - print("Starting string processing function service...") - await function.start() - except asyncio.CancelledError: - print("\nInitiating graceful shutdown...") - except Exception as e: - print(f"\nAn error occurred: {e}") - finally: - await function.close() - -if __name__ == "__main__": - try: - # Run the main function in an asyncio event loop - asyncio.run(main()) - except KeyboardInterrupt: - print("\nService stopped") \ No newline at end of file diff --git a/sdks/fs-python/examples/package.yaml b/sdks/fs-python/examples/package.yaml deleted file mode 100644 index 8602d3b1..00000000 --- a/sdks/fs-python/examples/package.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# FunctionStream Package Configuration -# This file defines the package metadata and function specifications for deployment. - -# Package name and type -name: my_function # Name of the function package -type: pulsar # Type of message broker to use - -# Module definitions -modules: - string: # Module name - name: string_process # Function name - description: "Appends an exclamation mark to the input string" # Function description - - # Input schema definition - inputSchema: - type: object - properties: - text: # Input parameter - type: string # Parameter type - required: - - text # Required parameter - - # Output schema definition - outputSchema: - type: object - properties: - result: # Output parameter - type: string # Parameter type \ No newline at end of file diff --git a/sdks/fs-python/examples/requirements.txt b/sdks/fs-python/examples/requirements.txt deleted file mode 100644 index 1e9cc773..00000000 --- a/sdks/fs-python/examples/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -# None. Please add your own deps here. \ No newline at end of file diff --git a/sdks/fs-python/examples/test_string_function.py b/sdks/fs-python/examples/test_string_function.py deleted file mode 100644 index 36c8bedc..00000000 --- a/sdks/fs-python/examples/test_string_function.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -String Function Test Client - -This module provides a test client for the string processing function example. -It demonstrates how to: -1. Connect to a Pulsar broker -2. Send a request to the string processing function -3. Receive and process the response -4. Handle timeouts and errors -5. Clean up resources properly - -The test client: -- Creates a unique request ID for tracking -- Sends a test message to the string processing function -- Waits for and validates the response -- Implements proper error handling and resource cleanup -""" - -import asyncio -import pulsar -import json -import uuid - -async def test_string_function(): - """ - Test the string processing function by sending a request and waiting for a response. - - This function: - 1. Connects to the Pulsar broker - 2. Sets up a consumer for responses - 3. Creates a producer for sending requests - 4. Sends a test request with a unique ID - 5. Waits for and processes the response - 6. Cleans up all resources - - The test uses a 5-second timeout for receiving responses. - """ - # Create a Pulsar client connection - client = pulsar.Client('pulsar://localhost:6650') - - # Set up a consumer to receive responses - consumer = client.subscribe( - 'response-topic', # Response topic name - 'test-subscription', - consumer_type=pulsar.ConsumerType.Shared - ) - - # Create a producer to send requests - producer = client.create_producer('string-topic') # Request topic name - - try: - # Generate a unique request ID for tracking - request_id = str(uuid.uuid4()) - - # Prepare the test request data - request_data = { - 'text': 'Hello World' - } - - # Send the request with metadata - producer.send( - json.dumps(request_data).encode('utf-8'), - properties={ - 'request_id': request_id, - 'response_topic': 'response-topic' - } - ) - - print(f"Request sent, waiting for response...") - - # Wait for and process the response - while True: - try: - # Receive message with a 5-second timeout - msg = consumer.receive(timeout_millis=5000) - msg_props = msg.properties() - - # Verify if this is the response to our request - if msg_props.get('request_id') == request_id: - response_data = json.loads(msg.data().decode('utf-8')) - print(f"Received response: {response_data}") - consumer.acknowledge(msg) - break - else: - # If not our response, requeue the message - consumer.negative_acknowledge(msg) - except pulsar.Timeout: - print("Response timeout - no response received within 5 seconds") - break - - finally: - # Clean up resources in the correct order - producer.close() - consumer.close() - client.close() - -if __name__ == "__main__": - # Run the test function in an asyncio event loop - asyncio.run(test_string_function()) \ No newline at end of file diff --git a/sdks/fs-python/function_stream/__init__.py b/sdks/fs-python/function_stream/__init__.py deleted file mode 100644 index 163bc509..00000000 --- a/sdks/fs-python/function_stream/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from .config import Config, PulsarConfig, PulsarSourceConfig, SourceSpec, SinkSpec, Metric -from .context import FSContext -from .function import FSFunction -from .metrics import Metrics, MetricsServer -from .module import FSModule - -__version__ = "0.6.0rc2" -__all__ = [ - # Core classes - "FSFunction", - "FSModule", - - # Configuration classes - "Config", - "PulsarConfig", - "PulsarSourceConfig", - "SourceSpec", - "SinkSpec", - "Metric", - - # Context and utilities - "FSContext", - - # Metrics and monitoring - "Metrics", - "MetricsServer" -] diff --git a/sdks/fs-python/function_stream/config.py b/sdks/fs-python/function_stream/config.py deleted file mode 100644 index a69cd3b6..00000000 --- a/sdks/fs-python/function_stream/config.py +++ /dev/null @@ -1,146 +0,0 @@ -import os -from typing import Dict, Any, Optional, List - -import yaml -from pydantic import BaseModel, Field - - -class PulsarConfig(BaseModel): - """ - Configuration for Pulsar connection settings. - - This class defines the connection parameters for connecting to a Pulsar cluster. - It includes authentication settings and performance tuning options. - """ - serviceUrl: str = "pulsar://localhost:6650" - """Pulsar service URL in format 'pulsar://host:port' or 'pulsar+ssl://host:port' for SSL""" - - authPlugin: str = "" - """Authentication plugin class name (e.g., 'org.apache.pulsar.client.impl.auth.AuthenticationTls')""" - - authParams: str = "" - """Authentication parameters in JSON format or key-value pairs""" - - max_concurrent_requests: int = 10 - """Maximum number of concurrent requests allowed for this connection""" - - -class PulsarSourceConfig(BaseModel): - """ - Configuration for Pulsar source/sink specific settings. - - This class defines topic-specific Pulsar configuration that can override - the global PulsarConfig settings for individual sources or sinks. - """ - topic: str - """Pulsar topic name to consume from or produce to""" - - -class SourceSpec(BaseModel): - """ - Specification for data sources. - - This class defines the configuration for input data sources. - Currently supports Pulsar as a source type. - """ - pulsar: Optional[PulsarSourceConfig] = None - """Pulsar source configuration (optional)""" - - -class SinkSpec(BaseModel): - """ - Specification for data sinks. - - This class defines the configuration for output data sinks. - Currently supports Pulsar as a sink type. - """ - pulsar: Optional[PulsarSourceConfig] = None - """Pulsar sink configuration (optional)""" - - -class Metric(BaseModel): - """ - Configuration for metrics and monitoring. - - This class defines settings for metrics collection and monitoring endpoints. - """ - port: Optional[int] = 9099 - """Port number for metrics endpoint (default: 9099)""" - - -class Config(BaseModel): - """ - Main configuration class for FunctionStream SDK. - - This is the root configuration class that contains all settings for the SDK, - including Pulsar connection, sources, sinks, metrics, and custom configuration. - """ - name: Optional[str] = None - """Function name identifier (optional)""" - - description: Optional[str] = None - """Function description (optional)""" - - pulsar: PulsarConfig = Field(default_factory=PulsarConfig) - """Pulsar connection configuration""" - - module: str = "default" - """Module name for the function (default: 'default')""" - - sources: List[SourceSpec] = Field(default_factory=list) - """List of input data sources""" - - requestSource: Optional[SourceSpec] = None - """Request source configuration for request-response pattern (optional)""" - - sink: Optional[SinkSpec] = None - """Output sink configuration (optional)""" - - subscriptionName: str = "function-stream-sdk-subscription" - """Pulsar subscription name for consuming messages""" - - metric: Metric = Field(default_factory=Metric) - """Metrics and monitoring configuration""" - - config: Dict[str, Any] = Field(default_factory=dict) - """Custom configuration key-value pairs for function-specific settings""" - - @classmethod - def from_yaml(cls, config_path: str = "config.yaml") -> "Config": - """ - Initialize configuration from YAML file. - - This method loads configuration from a YAML file and creates a Config instance. - The YAML file should contain configuration keys that match the Config class fields. - - Args: - config_path (str): Path to the configuration file (default: "config.yaml") - - Returns: - Config: Configuration instance loaded from the YAML file - - Raises: - FileNotFoundError: If the configuration file doesn't exist - yaml.YAMLError: If the YAML file is malformed - """ - if not os.path.exists(config_path): - raise FileNotFoundError(f"Configuration file not found: {config_path}") - - with open(config_path, 'r') as f: - config_data = yaml.safe_load(f) - return cls(**config_data) - - def get_config_value(self, config_name: str) -> Any: - """ - Get a configuration value by name from the config section. - - This method retrieves custom configuration values that were set in the - config dictionary. Useful for accessing function-specific settings. - - Args: - config_name (str): The name of the configuration to retrieve - - Returns: - Any: The configuration value, or None if not found - """ - return self.config.get(config_name) diff --git a/sdks/fs-python/function_stream/context.py b/sdks/fs-python/function_stream/context.py deleted file mode 100644 index f28c826f..00000000 --- a/sdks/fs-python/function_stream/context.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -FSContext module provides a context object that manages configuration access for FunctionStream SDK. - -This module defines the FSContext class which serves as a wrapper around the Config object, -providing a clean interface for accessing configuration values and handling any potential -errors during access. It also provides methods for metadata access and data production. -""" - -import logging -from typing import Any, Dict -from datetime import datetime - -from .config import Config - -# Configure logging -logger = logging.getLogger(__name__) - - -class FSContext: - """ - Context class that provides access to configuration values and runtime context. - - This class serves as a wrapper around the Config object, providing a clean interface - for accessing configuration values and handling any potential errors during access. - It also provides methods for metadata access and data production capabilities. - - Attributes: - config (Config): The configuration object containing all settings. - function (FSFunction, optional): Reference to the parent FSFunction instance. - """ - - def __init__(self, config: Config): - """ - Initialize the FSContext with a configuration object. - - Args: - config (Config): The configuration object to be used by this context. - """ - self.config = config - - def get_config(self, config_name: str) -> Any: - """ - Get a configuration value by name. - - This method safely retrieves a configuration value, handling any potential - errors during the retrieval process. If an error occurs, it logs the error - and returns an empty string. - - Args: - config_name (str): The name of the configuration to retrieve. - - Returns: - Any: The configuration value if found, empty string if not found or error occurs. - """ - try: - return self.config.get_config_value(config_name) - except Exception as e: - logger.error(f"Error getting config {config_name}: {str(e)}") - return "" - - def get_metadata(self, key: str) -> Any: - """ - Get metadata value by key. - - This method retrieves metadata associated with the current message. - - Args: - key (str): The metadata key to retrieve. - - Returns: - Any: The metadata value, currently always None. - """ - return None - - def produce(self, data: Dict[str, Any], event_time: datetime = None) -> None: - """ - Produce data to the output stream. - - This method is intended to send processed data to the output stream. - - Args: - data (Dict[str, Any]): The data to produce. - event_time (datetime, optional): The timestamp for the event. Defaults to None. - - Returns: - None: Currently always returns None. - """ - return None - - def get_configs(self) -> Dict[str, Any]: - """ - Get all configuration values. - - Returns a dictionary containing all configuration key-value pairs. - - Returns: - Dict[str, Any]: A dictionary containing all configuration values. - """ - return self.config.config - - def get_module(self) -> str: - """ - Get the current module name. - - Returns the name of the module currently being executed. - - Returns: - str: The name of the current module. - """ - return self.config.module diff --git a/sdks/fs-python/function_stream/function.py b/sdks/fs-python/function_stream/function.py deleted file mode 100644 index 887db9a4..00000000 --- a/sdks/fs-python/function_stream/function.py +++ /dev/null @@ -1,687 +0,0 @@ -""" -FunctionStream SDK - A Python SDK for building and deploying serverless functions. - -This module provides the core functionality for creating and managing FunctionStream -functions. It handles message processing, request/response flow, and resource management. -The module includes classes for function execution, message handling, and Pulsar integration. -""" -import asyncio -import dataclasses -import functools -import inspect -import json -import logging -import os -import time -import typing -from datetime import datetime, timezone -from typing import Callable, Any, Dict, Set, Union, Awaitable, get_type_hints, List, Optional - -import pulsar -from pulsar import Client, Producer - -from .config import Config -from .context import FSContext -from .metrics import Metrics, MetricsServer -from .module import FSModule - -# Configure logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def _validate_process_func(func: Callable, module_name: str): - """ - Validate the structure of a process function. - - This function performs comprehensive validation of a process function to ensure - it meets the requirements for FunctionStream processing. It checks parameter - count, types, and return types including support for async functions. - - Args: - func (Callable): The function to validate - module_name (str): Name of the module for error messages - - Raises: - ValueError: If the function structure is invalid, including: - - Incorrect number of parameters - - Missing type hints - - Invalid parameter types - - Invalid return types - """ - # Get function signature - sig = inspect.signature(func) - params = list(sig.parameters.values()) - - # Check number of parameters - if len(params) != 2: - raise ValueError( - f"Process function for module '{module_name}' must have exactly 2 parameters, " - f"got {len(params)}" - ) - - # Check parameter types using type hints - type_hints = get_type_hints(func) - if not ("context" in type_hints and "data" in type_hints and "return" in type_hints): - raise ValueError( - f"Process function for module '{module_name}' must have type hints for both parameters named 'context', 'data', and a return type" - ) - - def unwrap_annotated(annotation): - """Helper function to unwrap Annotated types.""" - origin = typing.get_origin(annotation) - if origin is typing.Annotated: - return unwrap_annotated(typing.get_args(annotation)[0]) - return annotation - - def is_dict_str_any(annotation): - """Check if annotation represents Dict[str, Any] or dict[str, Any].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - return (origin in (dict, typing.Dict)) and args == (str, Any) - - if not (type_hints["context"] == FSContext): - raise ValueError( - f"Process function for module '{module_name}' must have FSContext as first parameter" - ) - if not is_dict_str_any(type_hints["data"]): - raise ValueError( - f"Process function for module '{module_name}' must have Dict[str, Any] or dict[str, Any] as second parameter" - ) - # Check return type - return_type = type_hints.get('return') - - def is_dict_return(annotation): - """Check if annotation represents Dict[str, Any] or dict[str, Any].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - return (origin in (dict, typing.Dict)) and args == (str, Any) - - def is_none_type(annotation): - """Check if annotation represents None type.""" - ann = unwrap_annotated(annotation) - return ann is type(None) - - def is_awaitable_dict(annotation): - """Check if annotation represents Awaitable[Dict[str, Any]].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - return origin in (typing.Awaitable,) and len(args) == 1 and is_dict_return(args[0]) - - def is_awaitable_none(annotation): - """Check if annotation represents Awaitable[None].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - return origin in (typing.Awaitable,) and len(args) == 1 and is_none_type(args[0]) - - def is_union_of_dict_and_none(annotation): - """Check if annotation represents Union[Dict[str, Any], None].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - if origin in (typing.Union, Union): - return (any(is_dict_return(arg) for arg in args) and any(is_none_type(arg) for arg in args)) - return False - - def is_awaitable_union_dict_none(annotation): - """Check if annotation represents Awaitable[Union[Dict[str, Any], None]].""" - ann = unwrap_annotated(annotation) - origin = typing.get_origin(ann) - args = typing.get_args(ann) - if origin in (typing.Awaitable,): - if len(args) == 1: - return is_union_of_dict_and_none(args[0]) - return False - - if not ( - is_dict_return(return_type) - or is_awaitable_dict(return_type) - or is_none_type(return_type) - or is_awaitable_none(return_type) - or is_union_of_dict_and_none(return_type) - or is_awaitable_union_dict_none(return_type) - ): - raise ValueError( - f"Process function for module '{module_name}' must return Dict[str, Any], dict[str, Any], None, Awaitable thereof, or a Union with None, got {return_type}" - ) - - -@dataclasses.dataclass -class MsgWrapper: - """ - Wrapper class for message data and event timing. - - This class encapsulates message data along with its associated event timestamp, - providing a structured way to handle messages throughout the processing pipeline. - - Attributes: - data (Dict[str, Any]): The message data payload. - event_time (Optional[datetime]): The timestamp when the event occurred. - """ - data: Dict[str, Any] - event_time: Optional[datetime] = None - - -class FSFunction: - """ - FunctionStream Function - A serverless function handler for processing messages. - - This class provides a framework for building serverless functions that can process - messages from multiple Pulsar topics. It handles message consumption, processing, - and response generation, while managing resources and providing metrics. - - The FSFunction class is the main entry point for creating FunctionStream functions. - It manages the entire lifecycle of message processing, including: - - Pulsar client and consumer setup - - Message processing with configurable concurrency limits - - Response handling and error management - - Metrics collection and monitoring - - Graceful shutdown and resource cleanup - - Attributes: - config (Config): Configuration object containing function settings. - process_funcs (Dict[str, Union[Callable, FSModule]]): Dictionary of process functions or modules by module name. - client (Client): Pulsar client instance for message consumption and production. - semaphore (asyncio.Semaphore): Semaphore for controlling concurrent requests. - metrics (Metrics): Metrics collection object for monitoring function performance. - metrics_server (MetricsServer): Server for exposing metrics via HTTP endpoint. - context (FSContext): Context object for accessing configuration and runtime information. - _shutdown_event (asyncio.Event): Event flag for graceful shutdown coordination. - _current_tasks (Set[asyncio.Task]): Set of currently running processing tasks. - _tasks_lock (asyncio.Lock): Lock for thread-safe task management. - _consumer: Pulsar consumer for message consumption. - """ - - def __init__( - self, - process_funcs: Dict[ - str, Union[Callable[ - ["FSContext", Dict[str, Any]], Union[Dict[str, Any], Awaitable[Dict[str, Any]]]], FSModule]], - config_path: str = None - ): - """ - Initialize the FS Function. - - This method sets up the FunctionStream function with the provided process functions - and configuration. It performs validation of the module configuration and sets up - the Pulsar client, consumer, and other resources needed for message processing. - - Args: - process_funcs (Dict[str, Union[Callable, FSModule]]): Dictionary mapping module names to their process functions or modules. - Each function must accept two parameters: (context: FSContext, data: Dict[str, Any]) - and return either a Dict[str, Any] or an Awaitable[Dict[str, Any]]. - Each module must be an instance of FSModule. - config_path (str): Path to the configuration file. If None, uses FS_CONFIG_PATH environment variable or defaults to "config.yaml". - - Raises: - ValueError: If no module is specified in config or if the specified module - doesn't have a corresponding process function, or if the function - structure is invalid. - Exception: If there are errors during Pulsar client setup or consumer creation. - """ - if config_path is None: - config_path = os.getenv("FS_CONFIG_PATH", "config.yaml") - self.config = Config.from_yaml(config_path) - self.process_funcs = process_funcs - self.context = FSContext(self.config) - - # Validate module - module = self.config.module - if not module: - raise ValueError("No module specified in config") - if module not in process_funcs: - raise ValueError(f"Process function not found for module: {module}") - - # Validate function structure - process_func = process_funcs[module] - if isinstance(process_func, FSModule): - # For FSModule, we'll use its process method - process_func.init(self.context) - else: - _validate_process_func(process_func, module) - - # Create authentication if specified - auth = None - if self.config.pulsar.authPlugin: - auth = pulsar.Authentication( - self.config.pulsar.authPlugin, - self.config.pulsar.authParams - ) - - self.client = Client( - self.config.pulsar.serviceUrl, - authentication=auth, - operation_timeout_seconds=30 - ) - self.semaphore = asyncio.Semaphore(self.config.pulsar.max_concurrent_requests) - self.metrics = Metrics() - self.metrics_server = MetricsServer(self.metrics, port=self.config.metric.port) - self._shutdown_event = asyncio.Event() - self._current_tasks: Set[asyncio.Task] = set() - self._tasks_lock = asyncio.Lock() - self._consumer = None - - # Create multi-topics consumer - self._setup_consumer() - - def _setup_consumer(self): - """ - Set up a multi-topics consumer for all sources and the request source. - - This method creates a Pulsar consumer that subscribes to multiple topics - specified in the configuration. It collects topics from both regular sources - and the request source, creating a single consumer that can handle messages - from all configured topics. - - The consumer is configured with shared subscription type and appropriate - timeout settings for non-ordering guarantee workloads. - - Raises: - ValueError: If no subscription name is set or if no valid sources are found. - """ - topics = [] - subscription_name = self.config.subscriptionName - - if not subscription_name: - raise ValueError("subscriptionName is not set in config.yaml") - - # Collect topics from sources - for source in self.config.sources: - if source.pulsar and source.pulsar.topic: - topics.append(source.pulsar.topic) - logger.info(f"Added source topic: {source.pulsar.topic}") - - # Collect topics from request sources - if self.config.requestSource and self.config.requestSource.pulsar and self.config.requestSource.pulsar.topic: - topics.append(self.config.requestSource.pulsar.topic) - logger.info(f"Added request source topic: {self.config.requestSource.pulsar.topic}") - - if not topics: - raise ValueError("No valid sources or request sources found in config") - - # Create multi-topics consumer - self._consumer = self.client.subscribe( - topics, - subscription_name, - consumer_type=pulsar.ConsumerType.Shared, - unacked_messages_timeout_ms=30_000 # Only for non-ordering guarantee workload - ) - logger.info(f"Created multi-topics consumer for topics: {topics} with subscription: {subscription_name}") - - async def _add_task(self, task: asyncio.Task): - """ - Thread-safe method to add a task to the tracking set. - - This method safely adds a task to the internal tracking set using a lock - to ensure thread safety when multiple tasks are being processed concurrently. - - Args: - task (asyncio.Task): The task to add to tracking. - """ - async with self._tasks_lock: - self._current_tasks.add(task) - - async def _remove_task(self, task: asyncio.Task): - """ - Thread-safe method to remove a task from the tracking set. - - This method safely removes a task from the internal tracking set using a lock - to ensure thread safety. It handles any exceptions that might occur during - the removal process. - - Args: - task (asyncio.Task): The task to remove from tracking. - """ - async with self._tasks_lock: - try: - self._current_tasks.discard(task) - except Exception as e: - logger.error(f"Error removing task: {str(e)}") - - async def _get_tasks(self) -> Set[asyncio.Task]: - """ - Thread-safe method to get a copy of current tasks. - - This method returns a copy of the current tasks set to avoid race conditions - when iterating over the tasks. The copy is made while holding the lock. - - Returns: - Set[asyncio.Task]: A copy of the current tasks set. - """ - async with self._tasks_lock: - return set(self._current_tasks) - - @functools.lru_cache(maxsize=100) - def _get_producer(self, topic: str) -> Producer: - """ - Get a producer for the specified topic. - - This method uses an LRU cache to efficiently manage Pulsar producers. - Producers are cached by topic to avoid creating new ones for each message, - improving performance and resource utilization. - - Args: - topic (str): The topic to create a producer for. - - Returns: - Producer: A Pulsar producer for the specified topic. - """ - return self.client.create_producer(topic) - - async def process_request(self, message): - """ - Process an incoming request and send a response. - - This method is the core message processing function that: - 1. Records metrics for the request - 2. Processes the request using the configured module - 3. Sends the response back to the appropriate topic - 4. Handles any errors that occur during processing - 5. Manages message acknowledgment - - The method supports both synchronous and asynchronous process functions, - and handles various types of responses including error responses. - - Args: - message: The incoming Pulsar message to process. - """ - start_time = time.time() - self.metrics.record_request_start() - - task = asyncio.current_task() - await self._add_task(task) - - try: - async with self.semaphore: - if self._shutdown_event.is_set(): - logger.info("Skipping request processing due to shutdown") - return - - try: - request_data = json.loads(message.data().decode('utf-8')) - request_id = message.properties().get('request_id') - response_topic = message.properties().get('response_topic') - - # If no response_topic is provided, use the sink topic as default - if not response_topic and self.config.sink and self.config.sink.pulsar and self.config.sink.pulsar.topic: - response_topic = self.config.sink.pulsar.topic - - module = self.config.module - process_func = self.process_funcs[module] - - context = FSContext(self.config) - resp_msgs: List[MsgWrapper] = [] - - def produce(data: Dict[str, Any], event_time: datetime = None): - """Local produce function to collect response messages.""" - resp_msgs.append(MsgWrapper(data=data, event_time=event_time)) - - context.produce = produce - - def get_metadata(key: str) -> Any: - """Local metadata function to provide message metadata.""" - if key == "topic": - return message.topic_name() - if key == "message_id": - return message.message_id() - raise KeyError(key) - - context.get_metadata = get_metadata - - # Call the function with context as first argument and handle both sync and async results - response_data = None - try: - if isinstance(process_func, FSModule): - result = process_func.process(context, request_data) - else: - result = process_func(context, request_data) - - if result is not None: - if isinstance(result, Awaitable): - response_data = await result - else: - response_data = result - except Exception as e: - logger.error(f"Error invoking process function: {str(e)}") - raise Exception(f"Error invoking process function: {str(e)}") from e - if response_data: - resp_msgs.append(MsgWrapper(data=response_data, event_time=datetime.now(timezone.utc))) - - if not response_topic: - logger.warning("No response_topic provided and no sink topic available. Skip messages") - else: - await self._send_response(response_topic, request_id, resp_msgs) - - latency = time.time() - start_time - self.metrics.record_request_end(True, latency) - self.metrics.record_event(True) - - if request_id is None: - logger.info(f"Finished processing request and acknowledged {message.message_id()}") - self._consumer.acknowledge(message) - - except json.JSONDecodeError as e: - logger.error(f"Failed to decode request JSON: {e}") - self.metrics.record_request_end(False, time.time() - start_time) - self.metrics.record_event(False) - raise e - except asyncio.CancelledError as e: - logger.info("Request processing cancelled due to shutdown") - self.metrics.record_request_end(False, time.time() - start_time) - self.metrics.record_event(False) - raise e - except Exception as e: - logger.error(f"Error processing request: {type(e).__name__}: {e}") - if not self._shutdown_event.is_set(): - if request_id: # Only send the response back if the request_id exists - await self._send_response( - response_topic, - request_id, - [MsgWrapper(data={'error': str(e)}, event_time=datetime.now(timezone.utc))] - ) - self.metrics.record_request_end(False, time.time() - start_time) - self.metrics.record_event(False) - finally: - await self._remove_task(task) - if request_id: - self._consumer.acknowledge(message) - - async def _send_response(self, response_topic: str, request_id: str, msg: List[MsgWrapper]): - """ - Send a response message using cached producer asynchronously. - - This method sends response messages to the specified topic using the cached - Pulsar producer. It handles multiple messages in parallel and provides - proper error handling and logging for failed sends. - - The method converts datetime objects to ISO format strings for JSON serialization - and sets appropriate event timestamps for Pulsar messages. - - Args: - response_topic (str): The topic to send the response to. - request_id (str): The ID of the request being responded to. - msg (List[MsgWrapper]): The list of messages to send. - - Raises: - Exception: If there's an error sending the response. - """ - loop = asyncio.get_event_loop() - try: - producer = self._get_producer(response_topic) - - def default_serializer(o): - """Custom JSON serializer for datetime objects.""" - if isinstance(o, datetime): - return o.isoformat() - return str(o) - - send_futures = [] - for m in msg: - future = loop.create_future() - message_data = json.dumps(m.data, default=default_serializer).encode('utf-8') - - def create_callback(f): - """Create a callback function for async message sending.""" - def callback(res, msg_id): - if res != pulsar.Result.Ok: - err = Exception(f"Error producing: {res}") - logger.error(str(err)) - loop.call_soon_threadsafe(f.set_exception, err) - else: - loop.call_soon_threadsafe(f.set_result, msg_id) - - return callback - - event_timestamp = None - if m.event_time is not None: - # Convert datetime to milliseconds since epoch, with exact millisecond precision - event_timestamp = int( - m.event_time.replace(tzinfo=timezone.utc).timestamp()) * 1000 + m.event_time.microsecond // 1000 - send_kwargs = dict( - event_timestamp=event_timestamp - ) - if request_id is not None: - send_kwargs['properties'] = {'request_id': request_id} - producer.send_async( - message_data, - create_callback(future), - **send_kwargs - ) - send_futures.append(future) - await asyncio.gather(*send_futures, return_exceptions=True) - except Exception as e: - logger.error(f"Error sending response: {type(e).__name__}: {e}") - raise - - async def start(self): - """ - Start processing requests from all consumers. - - This method is the main entry point for starting the FunctionStream function. - It: - 1. Starts the metrics server for monitoring - 2. Enters a loop to process incoming messages - 3. Handles graceful shutdown when requested - 4. Manages the consumer receive loop with proper error handling - - The method runs indefinitely until a shutdown signal is received, either - through cancellation or keyboard interrupt. - """ - module = self.config.module - logger.info(f"Starting FS Function with module: {module}") - - await self.metrics_server.start() - - try: - while not self._shutdown_event.is_set(): - try: - msg = await asyncio.get_event_loop().run_in_executor( - None, lambda: self._consumer.receive(1000) - ) - if msg: - asyncio.create_task(self.process_request(msg)) - except pulsar.Timeout: - continue - except asyncio.CancelledError: - logger.info("Received cancellation signal, initiating shutdown...") - self._shutdown_event.set() - break - except Exception as e: - logger.error(f"Error in request processing loop: {str(e)}") - if not self._shutdown_event.is_set(): - await asyncio.sleep(1) - except KeyboardInterrupt: - logger.info("Received keyboard interrupt, initiating shutdown...") - self._shutdown_event.set() - finally: - logger.info("Request processing loop stopped") - await self.close() - - async def close(self): - """ - Close the service and clean up resources. - - This method performs a graceful shutdown of the FunctionStream function by: - 1. Stopping the metrics server - 2. Closing the Pulsar consumer - 3. Clearing the producer cache - 4. Closing the Pulsar client - - The method ensures that all resources are properly cleaned up and handles - any errors that might occur during the shutdown process. - """ - logger.info("Closing FS Function resources...") - - await self.metrics_server.stop() - - # Close consumer - if self._consumer is not None: - try: - self._consumer.close() - self._consumer = None - logger.info("Consumer closed successfully") - except Exception as e: - logger.error(f"Error closing consumer: {str(e)}") - - # Clear the producer cache - self._get_producer.cache_clear() - - # Close the Pulsar client - try: - await asyncio.sleep(0.1) - self.client.close() - logger.info("Pulsar client closed successfully") - except Exception as e: - if "AlreadyClosed" not in str(e): - logger.error(f"Error closing Pulsar client: {str(e)}") - - def __del__(self): - """ - Ensure resources are cleaned up when the object is destroyed. - - This finalizer ensures that all resources are properly closed when the - object is garbage collected. It provides a safety net for resource cleanup - in case the explicit close() method is not called. - """ - if self._consumer is not None: - try: - self._consumer.close() - except: - pass - try: - self._get_producer.cache_clear() - except: - pass - if self.client is not None: - try: - self.client.close() - except: - pass - - def get_metrics(self) -> Dict[str, Any]: - """ - Get current metrics for monitoring. - - This method returns the current metrics collected by the FunctionStream function, - providing insights into performance, throughput, and error rates. - - Returns: - Dict[str, Any]: A dictionary containing the current metrics. - """ - return self.metrics.get_metrics() - - def get_context(self) -> FSContext: - """ - Get the FSContext instance associated with this function. - - This method provides access to the context object that contains configuration - and runtime information for the function. - - Returns: - FSContext: The context object containing configuration and runtime information. - """ - return self.context diff --git a/sdks/fs-python/function_stream/metrics.py b/sdks/fs-python/function_stream/metrics.py deleted file mode 100644 index 8f35ffc5..00000000 --- a/sdks/fs-python/function_stream/metrics.py +++ /dev/null @@ -1,146 +0,0 @@ -import logging -import time -from typing import Dict, Any - -from aiohttp import web - -# Configure logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class Metrics: - """ - Prometheus-style metrics for monitoring system performance. - - This class tracks various metrics including request counts, latencies, and event statistics. - All metrics are exposed in Prometheus-compatible format. - """ - - def __init__(self): - self.total_requests = 0 - self.active_requests = 0 - self.successful_requests = 0 - self.failed_requests = 0 - self.request_latency = 0.0 - self.last_request_time = 0.0 - self.total_events = 0 - self.successful_events = 0 - self.failed_events = 0 - - def record_request_start(self): - """ - Record the start of a new request. - - This method increments the total request counter and active request counter, - and updates the last request timestamp. - """ - self.total_requests += 1 - self.active_requests += 1 - self.last_request_time = time.time() - - def record_request_end(self, success: bool, latency: float): - """ - Record the end of a request. - - Args: - success (bool): Whether the request was successful. - latency (float): The request latency in seconds. - """ - self.active_requests -= 1 - if success: - self.successful_requests += 1 - else: - self.failed_requests += 1 - self.request_latency = latency - - def record_event(self, success: bool): - """ - Record an event (success or failure). - - Args: - success (bool): Whether the event was successful. - """ - self.total_events += 1 - if success: - self.successful_events += 1 - else: - self.failed_events += 1 - - def get_metrics(self) -> Dict[str, Any]: - """ - Get current metrics in Prometheus format. - - Returns: - Dict[str, Any]: A dictionary containing all metrics in Prometheus-compatible format. - Includes request counts, latencies, event statistics, and derived metrics - like success rates. - """ - return { - # Request metrics - 'fs_total_requests': self.total_requests, - 'fs_active_requests': self.active_requests, - 'fs_successful_requests': self.successful_requests, - 'fs_failed_requests': self.failed_requests, - 'fs_request_latency_seconds': self.request_latency, - 'fs_last_request_timestamp': self.last_request_time, - - # Event metrics - 'fs_total_events': self.total_events, - 'fs_successful_events': self.successful_events, - 'fs_failed_events': self.failed_events, - - # Derived metrics - 'fs_request_success_rate': ( - self.successful_requests / self.total_requests) if self.total_requests > 0 else 0, - 'fs_event_success_rate': (self.successful_events / self.total_events) if self.total_events > 0 else 0 - } - - -class MetricsServer: - def __init__(self, metrics: Metrics, host: str = '127.0.0.1', port: int = 9099): - self.metrics = metrics - self.host = host - self.port = port - self.app = web.Application() - self.app.router.add_get('/', self.handle_root) - self.app.router.add_get('/metrics', self.handle_metrics) - self.runner = None - - async def handle_root(self, request): - """Handle root endpoint request""" - return web.Response(text="FS SDK Metrics Server\nUse /metrics endpoint to get metrics data") - - async def handle_metrics(self, request): - """Handle metrics endpoint request""" - try: - metrics_data = self.metrics.get_metrics() - return web.json_response(metrics_data) - except Exception as e: - logger.error(f"Error getting metrics: {str(e)}") - return web.json_response( - {"error": "Failed to get metrics"}, - status=500 - ) - - async def start(self): - """Start the metrics server""" - try: - self.runner = web.AppRunner(self.app) - await self.runner.setup() - site = web.TCPSite(self.runner, self.host, self.port) - await site.start() - logger.info(f"Metrics server started at http://{self.host}:{self.port}/metrics") - except Exception as e: - logger.error(f"Failed to start metrics server: {str(e)}") - raise - - async def stop(self): - """Stop the metrics server""" - if self.runner: - try: - await self.runner.cleanup() - logger.info("Metrics server stopped") - except Exception as e: - logger.error(f"Error stopping metrics server: {str(e)}") - raise diff --git a/sdks/fs-python/function_stream/module.py b/sdks/fs-python/function_stream/module.py deleted file mode 100644 index 7c60d0cf..00000000 --- a/sdks/fs-python/function_stream/module.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -FSModule module provides the base class for all FunctionStream modules. - -This module defines the abstract base class FSModule that all FunctionStream modules -must inherit from. It provides a common interface for module initialization and -data processing, ensuring consistency across different module implementations. -""" - -from abc import ABC, abstractmethod -from typing import Dict, Any - -from .context import FSContext - - -class FSModule(ABC): - """ - Base class for all FunctionStream modules. - - This abstract base class provides a common interface for all modules in the - FunctionStream SDK. Each module must implement the init and process methods - to handle module initialization and incoming data processing. - - Attributes: - name (str): The name of the module (to be set during initialization). - """ - - @abstractmethod - def init(self, context: FSContext): - """ - Initialize the module with the provided context. - - This method is called during module initialization to set up the module - with the necessary context and configuration. Subclasses must implement - this method to handle any required setup. - - Args: - context (FSContext): The context object containing configuration and - runtime information for the module. - """ - - @abstractmethod - async def process(self, context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - """ - Process incoming data asynchronously. - - This method is the core processing function that handles incoming data. - Subclasses must implement this method to define the specific data processing - logic for their module. The method should be asynchronous to support - non-blocking operations. - - Args: - context (FSContext): The context object containing configuration and - runtime information. - data (Dict[str, Any]): The input data to process. This is typically - a dictionary containing the message payload - and any associated metadata. - - Returns: - Dict[str, Any]: The processed data that should be returned as the - result of the processing operation. - - Raises: - NotImplementedError: This method must be implemented by subclasses. - """ - raise NotImplementedError("Subclasses must implement process method") diff --git a/sdks/fs-python/pyproject.toml b/sdks/fs-python/pyproject.toml deleted file mode 100644 index 7d9d37bd..00000000 --- a/sdks/fs-python/pyproject.toml +++ /dev/null @@ -1,111 +0,0 @@ -[build-system] -requires = ["setuptools>=61.0", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "function-stream" -dynamic = ["version"] -description = "FunctionStream SDK is a powerful Python library for building and deploying serverless streaming functions that runs on Function Stream platform." -readme = "README.md" -license = { text = "Apache-2.0" } -authors = [ - { name = "FunctionStream Org" } -] -maintainers = [ - { name = "FunctionStream Org" } -] -keywords = ["serverless", "functions", "pulsar", "event-driven", "streaming"] -classifiers = [ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Topic :: Software Development :: Libraries :: Python Modules", - "Topic :: System :: Distributed Computing", - "Topic :: Internet :: WWW/HTTP :: HTTP Servers", - "Topic :: System :: Networking", -] -requires-python = ">=3.9" -dependencies = [ - "pulsar-client>=3.0.0", - "pyyaml>=6.0", - "aiohttp>=3.8.0", - "pydantic>=2.0.0", -] - -[project.optional-dependencies] -dev = [ - "pytest>=7.0.0", - "pytest-asyncio>=0.21.0", - "black>=22.0.0", - "flake8>=5.0.0", - "mypy>=1.0.0", - "pre-commit>=3.0.0", -] -test = [ - "pytest>=7.0.0", - "pytest-asyncio>=0.21.0", - "pytest-cov>=4.0.0", -] -docs = [ - "sphinx>=6.0.0", - "sphinx-rtd-theme>=1.0.0", - "myst-parser>=1.0.0", -] - -[project.urls] -Homepage = "https://github.com/functionstream/function-stream" -Documentation = "https://github.com/functionstream/function-stream/tree/main/sdks/fs-python" -Repository = "https://github.com/functionstream/function-stream" -"Bug Tracker" = "https://github.com/functionstream/function-stream/issues" -"Source Code" = "https://github.com/functionstream/function-stream" - -[tool.setuptools.dynamic] -version = { attr = "function_stream.__version__" } - -[tool.setuptools.packages.find] -where = ["."] -include = ["function_stream*"] -exclude = ["tests*", "examples*"] - -[tool.black] -line-length = 88 -target-version = ['py39'] -include = '\.pyi?$' -extend-exclude = ''' -/( - # directories - \.eggs - | \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | build - | dist -)/ -''' - -[tool.isort] -profile = "black" -multi_line_output = 3 -line_length = 88 -known_first_party = ["function_stream"] - -[tool.pytest.ini_options] -minversion = "7.0" -addopts = "-ra -q --strict-markers --strict-config" -testpaths = ["tests"] -python_files = ["test_*.py", "*_test.py"] -python_classes = ["Test*"] -python_functions = ["test_*"] -markers = [ - "slow: marks tests as slow (deselect with '-m \"not slow\"')", - "integration: marks tests as integration tests", - "unit: marks tests as unit tests", -] \ No newline at end of file diff --git a/sdks/fs-python/pytest.ini b/sdks/fs-python/pytest.ini deleted file mode 100644 index 563e1e76..00000000 --- a/sdks/fs-python/pytest.ini +++ /dev/null @@ -1,10 +0,0 @@ -[pytest] -testpaths = tests -python_files = test_*.py -python_classes = Test* -python_functions = test_* -log_cli = true -log_cli_level = INFO -log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s) -log_cli_date_format = %Y-%m-%d %H:%M:%S -asyncio_default_fixture_loop_scope = function \ No newline at end of file diff --git a/sdks/fs-python/requirements.txt b/sdks/fs-python/requirements.txt deleted file mode 100644 index 58608dc3..00000000 --- a/sdks/fs-python/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -pulsar-client>=3.0.0 -pyyaml>=6.0 -aiohttp>=3.8.0 -pydantic>=2.0.0 -pytest-asyncio>=1.0.0 \ No newline at end of file diff --git a/sdks/fs-python/tests/conftest.py b/sdks/fs-python/tests/conftest.py deleted file mode 100644 index 96e27f54..00000000 --- a/sdks/fs-python/tests/conftest.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Shared test configurations and fixtures. -""" - -from unittest.mock import Mock - -import pytest - - -@pytest.fixture -def mock_pulsar_message(): - """Create a mock Pulsar message.""" - - def create_message(data, properties=None): - message = Mock() - message.data.return_value = data - message.properties.return_value = properties or {} - return message - - return create_message diff --git a/sdks/fs-python/tests/test_config.py b/sdks/fs-python/tests/test_config.py deleted file mode 100644 index f536f42b..00000000 --- a/sdks/fs-python/tests/test_config.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Unit tests for the Config class. -""" - -import pytest -import yaml - -from function_stream import Config - - -class TestConfig: - """Test suite for Config class.""" - - @pytest.fixture - def sample_config_yaml(self, tmp_path): - """Create a sample config.yaml file for testing.""" - config_data = { - "pulsar": { - "serviceUrl": "pulsar://localhost:6650", - "authPlugin": "", - "authParams": "", - "max_concurrent_requests": 10 - }, - "module": "test_module", - "sources": [ - { - "pulsar": { - "topic": "test_topic" - } - } - ], - "requestSource": { - "pulsar": { - "topic": "request_topic" - } - }, - "sink": { - "pulsar": { - "topic": "response_topic" - } - }, - "subscriptionName": "test_subscription", - "name": "test_function", - "description": "Test function", - "config": { - "test_key": "test_value" - } - } - - config_path = tmp_path / "config.yaml" - with open(config_path, 'w') as f: - yaml.dump(config_data, f) - return str(config_path) - - def test_from_yaml(self, sample_config_yaml): - """Test loading configuration from YAML file.""" - config = Config.from_yaml(sample_config_yaml) - - # Test Pulsar config - assert config.pulsar.serviceUrl == "pulsar://localhost:6650" - assert config.pulsar.authPlugin == "" - assert config.pulsar.authParams == "" - assert config.pulsar.max_concurrent_requests == 10 - - # Test module config - assert config.module == "test_module" - - # Test sources - assert len(config.sources) == 1 - assert config.sources[0].pulsar.topic == "test_topic" - - # Test request source - assert config.requestSource.pulsar.topic == "request_topic" - - # Test sink - assert config.sink.pulsar.topic == "response_topic" - - # Test subscription name - assert config.subscriptionName == "test_subscription" - - # Test name and description - assert config.name == "test_function" - assert config.description == "Test function" - - # Test config values - assert config.get_config_value("test_key") == "test_value" - - def test_from_yaml_file_not_found(self): - """Test loading configuration from non-existent file.""" - with pytest.raises(FileNotFoundError): - Config.from_yaml("non_existent.yaml") - - def test_get_config_value_not_found(self, sample_config_yaml): - """Test getting non-existent config value.""" - config = Config.from_yaml(sample_config_yaml) - assert config.get_config_value("non_existent_key") is None diff --git a/sdks/fs-python/tests/test_context.py b/sdks/fs-python/tests/test_context.py deleted file mode 100644 index fc8293cd..00000000 --- a/sdks/fs-python/tests/test_context.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Unit tests for the FSContext class. -""" - -from unittest.mock import Mock -from datetime import datetime, timezone - -import pytest - -from function_stream import FSContext, Config - - -class TestFSContext: - """Test suite for FSContext class.""" - - @pytest.fixture - def mock_config(self): - """Create a mock Config object for testing.""" - config = Mock(spec=Config) - return config - - @pytest.fixture - def context(self, mock_config): - """Create a FSContext instance with mock config.""" - return FSContext(mock_config) - - def test_init(self, mock_config): - """Test FSContext initialization.""" - context = FSContext(mock_config) - assert context.config == mock_config - - def test_get_config_success(self, context, mock_config): - """Test successful config value retrieval.""" - # Setup - mock_config.get_config_value.return_value = "test_value" - - # Execute - result = context.get_config("test_key") - - # Verify - mock_config.get_config_value.assert_called_once_with("test_key") - assert result == "test_value" - - def test_get_config_error(self, context, mock_config): - """Test config value retrieval with error.""" - # Setup - mock_config.get_config_value.side_effect = Exception("Test error") - - # Execute - result = context.get_config("test_key") - - # Verify - mock_config.get_config_value.assert_called_once_with("test_key") - assert result == "" - - def test_get_config_non_string_value(self, context, mock_config): - """Test config value retrieval with non-string value.""" - # Setup - mock_config.get_config_value.return_value = 123 - - # Execute - result = context.get_config("test_key") - - # Verify - mock_config.get_config_value.assert_called_once_with("test_key") - assert result == 123 - - def test_get_metadata_default_implementation(self, context): - """Test that get_metadata returns None by default.""" - result = context.get_metadata("any_key") - assert result is None - - def test_produce_default_implementation(self, context): - """Test that produce does nothing by default.""" - test_data = {"key": "value"} - test_time = datetime.now(timezone.utc) - - # Should not raise any exception - result = context.produce(test_data, test_time) - assert result is None - - def test_produce_without_event_time(self, context): - """Test produce method without event_time parameter.""" - test_data = {"key": "value"} - - # Should not raise any exception - result = context.produce(test_data) - assert result is None - - def test_get_configs(self, context, mock_config): - """Test get_configs method.""" - # Setup - mock_config.config = {"key1": "value1", "key2": "value2"} - - # Execute - result = context.get_configs() - - # Verify - assert result == {"key1": "value1", "key2": "value2"} - - def test_get_module(self, context, mock_config): - """Test get_module method.""" - # Setup - mock_config.module = "test_module" - - # Execute - result = context.get_module() - - # Verify - assert result == "test_module" diff --git a/sdks/fs-python/tests/test_function.py b/sdks/fs-python/tests/test_function.py deleted file mode 100644 index 3cb19468..00000000 --- a/sdks/fs-python/tests/test_function.py +++ /dev/null @@ -1,334 +0,0 @@ -""" -Unit tests for the FSFunction class. -""" - -import asyncio -import inspect -import json -from typing import Dict, Any -from unittest.mock import Mock, patch, AsyncMock - -import pulsar -import pytest - -from function_stream import ( - FSFunction, - Config, - PulsarConfig, - SinkSpec, - SourceSpec, - PulsarSourceConfig, - Metrics, - MetricsServer, - FSContext -) -from function_stream.function import MsgWrapper - - -class TestFSFunction: - """Test suite for FSFunction class.""" - - @pytest.fixture - def mock_config(self): - """Create a mock Config object for testing.""" - config = Mock(spec=Config) - config.module = "test_module" - config.subscriptionName = "test_subscription" - config.pulsar = PulsarConfig( - serviceUrl="pulsar://localhost:6650", - authPlugin="", - authParams="", - max_concurrent_requests=10 - ) - config.sources = [SourceSpec(pulsar=PulsarSourceConfig(topic="test_topic"))] - config.requestSource = SourceSpec(pulsar=PulsarSourceConfig(topic="request_topic")) - config.sink = SinkSpec(pulsar=PulsarSourceConfig(topic="response_topic")) - - metric_mock = Mock() - metric_mock.port = 8080 - config.metric = metric_mock - - return config - - @pytest.fixture - def mock_client(self): - """Create a mock Pulsar client.""" - client = Mock() - return client - - @pytest.fixture - def mock_consumer(self): - """Create a mock Pulsar consumer.""" - consumer = Mock() - return consumer - - @pytest.fixture - def mock_producer(self): - """Create a mock Pulsar producer.""" - producer = Mock() - - # Mock send_async to properly handle callbacks - def mock_send_async(data, callback, **kwargs): - # Simulate successful send by calling the callback with Ok result - callback(pulsar.Result.Ok, "mock_message_id") - - producer.send_async = mock_send_async - producer.send = Mock() - - return producer - - @pytest.fixture - def mock_metrics(self): - """Create a mock Metrics object.""" - metrics = Mock(spec=Metrics) - return metrics - - @pytest.fixture - def mock_metrics_server(self): - """Create a mock MetricsServer object.""" - metrics_server = Mock(spec=MetricsServer) - metrics_server.start = AsyncMock() - metrics_server.stop = AsyncMock() - return metrics_server - - @pytest.fixture - def function(self, mock_config, mock_client, mock_consumer, - mock_producer, mock_metrics, mock_metrics_server): - """Create a FSFunction instance with mocks, patching Config to avoid file IO.""" - with patch('function_stream.function.Config.from_yaml', return_value=mock_config), \ - patch('function_stream.function.Client', return_value=mock_client), \ - patch('function_stream.function.Metrics', return_value=mock_metrics), \ - patch('function_stream.function.MetricsServer', return_value=mock_metrics_server): - mock_client.subscribe.return_value = mock_consumer - mock_client.create_producer.return_value = mock_producer - - async def process_func(context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - return {"processed": data} - - process_funcs = {"test_module": process_func} - return FSFunction( - process_funcs=process_funcs, - config_path="test_config.yaml" - ) - - @pytest.mark.asyncio - async def test_init(self): - """Test FSFunction initialization.""" - with patch('function_stream.function.Config.from_yaml') as mock_from_yaml, \ - patch('function_stream.function.Client') as mock_client: - mock_config = Mock(spec=Config) - mock_config.module = "test_module" - mock_config.subscriptionName = "test_subscription" - mock_config.pulsar = PulsarConfig( - serviceUrl="pulsar://localhost:6650", - authPlugin="", - authParams="", - max_concurrent_requests=10 - ) - mock_config.sources = [SourceSpec(pulsar=PulsarSourceConfig(topic="test_topic"))] - mock_config.requestSource = SourceSpec(pulsar=PulsarSourceConfig(topic="request_topic")) - mock_config.sink = SinkSpec(pulsar=PulsarSourceConfig(topic="response_topic")) - - metric_mock = Mock() - metric_mock.port = 8080 - mock_config.metric = metric_mock - - mock_from_yaml.return_value = mock_config - mock_client.return_value.subscribe.return_value = Mock() - mock_client.return_value.create_producer.return_value = Mock() - - async def process_func(context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - return {"processed": data} - - process_funcs = {"test_module": process_func} - function = FSFunction( - process_funcs=process_funcs, - config_path="test_config.yaml" - ) - sig = inspect.signature(function.process_funcs["test_module"]) - assert list(sig.parameters.keys()) == ["context", "data"] - - @pytest.mark.asyncio - async def test_process_request_success(self, function): - """Test successful request processing.""" - message = Mock() - message.data.return_value = json.dumps({"test": "data"}).encode('utf-8') - message.properties.return_value = { - "request_id": "test_id", - "response_topic": "response_topic" - } - message.message_id.return_value = "test_message_id" - - # Mock the consumer acknowledge method - function._consumer.acknowledge = Mock() - - await function.process_request(message) - - # Verify that the message was processed successfully by checking - # that the consumer acknowledge was called - function._consumer.acknowledge.assert_called_once_with(message) - - @pytest.mark.asyncio - async def test_process_request_with_metadata_access(self, function): - """Test request processing with metadata access through context.""" - message = Mock() - message.data.return_value = json.dumps({"test": "data"}).encode('utf-8') - message.properties.return_value = { - "request_id": "test_id", - "response_topic": "response_topic" - } - message.message_id.return_value = "test_message_id" - message.topic_name.return_value = "test_topic" - - # Mock the consumer acknowledge method - function._consumer.acknowledge = Mock() - - # Create a process function that accesses metadata - async def process_func_with_metadata(context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - topic = context.get_metadata("topic") - message_id = context.get_metadata("message_id") - return { - "processed": data, - "metadata": { - "topic": topic, - "message_id": message_id - } - } - - function.process_funcs["test_module"] = process_func_with_metadata - - await function.process_request(message) - - # Verify that the message was processed successfully - function._consumer.acknowledge.assert_called_once_with(message) - - @pytest.mark.asyncio - async def test_process_request_metadata_invalid_key(self, function): - """Test request processing with invalid metadata key access.""" - message = Mock() - message.data.return_value = json.dumps({"test": "data"}).encode('utf-8') - message.properties.return_value = { - "request_id": "test_id", - "response_topic": "response_topic" - } - message.message_id.return_value = "test_message_id" - message.topic_name.return_value = "test_topic" - - # Mock the consumer acknowledge method - function._consumer.acknowledge = Mock() - - # Create a process function that accesses invalid metadata - async def process_func_with_invalid_metadata(context: FSContext, data: Dict[str, Any]) -> Dict[str, Any]: - try: - context.get_metadata("invalid_key") - return {"error": "Should have raised KeyError"} - except KeyError: - return {"error": "KeyError raised as expected"} - - function.process_funcs["test_module"] = process_func_with_invalid_metadata - - await function.process_request(message) - - # Verify that the message was processed successfully - function._consumer.acknowledge.assert_called_once_with(message) - - @pytest.mark.asyncio - async def test_process_request_json_error(self, function, mock_metrics): - """Test request processing with JSON decode error.""" - message = Mock() - message.data.return_value = b"invalid json" - message.properties.return_value = {"request_id": "test_id"} - message.message_id.return_value = "test_message_id" - - # Mock the consumer acknowledge method - function._consumer.acknowledge = Mock() - - # The function has a bug where it tries to access request_id in finally block - # even when JSON parsing fails, so we expect an UnboundLocalError - with pytest.raises(UnboundLocalError): - await function.process_request(message) - - @pytest.mark.asyncio - async def test_process_request_no_response_topic(self, function, mock_metrics): - """Test request processing with no response topic.""" - message = Mock() - message.data.return_value = json.dumps({"test": "data"}).encode('utf-8') - message.properties.return_value = {"request_id": "test_id"} - message.message_id.return_value = "test_message_id" - function.config.sink = None - - # Mock the consumer acknowledge method - function._consumer.acknowledge = Mock() - - await function.process_request(message) - # The function processes successfully but skips sending response due to no topic - # So it should record success, not failure - mock_metrics.record_event.assert_called_with(True) - function._consumer.acknowledge.assert_called_once_with(message) - - @pytest.mark.asyncio - async def test_start_and_shutdown(self, function, mock_consumer, mock_metrics_server): - """Test function start and graceful shutdown.""" - mock_consumer.receive.side_effect = [ - Mock(data=lambda: json.dumps({"test": "data"}).encode('utf-8'), - properties=lambda: {"request_id": "test_id", "response_topic": "response_topic"}), - asyncio.CancelledError() - ] - try: - await function.start() - except asyncio.CancelledError: - pass - mock_metrics_server.start.assert_called_once() - mock_metrics_server.stop.assert_called_once() - - def test_get_metrics(self, function, mock_metrics): - """Test metrics retrieval.""" - mock_metrics.get_metrics.return_value = {"test": "metrics"} - result = function.get_metrics() - mock_metrics.get_metrics.assert_called_once() - assert result == {"test": "metrics"} - - def test_get_context(self, function, mock_config): - """Test context retrieval.""" - context = function.get_context() - assert context is not None - assert context.config == mock_config - - @pytest.mark.asyncio - async def test_send_response(self, function): - """Test response sending.""" - response_topic = "test_topic" - request_id = "test_id" - response_data = {"result": "test"} - - # Create MsgWrapper objects as expected by _send_response - msg_wrappers = [MsgWrapper(data=response_data)] - - # This should not raise an exception - await function._send_response(response_topic, request_id, msg_wrappers) - - # The test passes if no exception is raised - assert True - - @pytest.mark.asyncio - async def test_send_response_error(self, function): - """Test response sending with error.""" - response_topic = "test_topic" - request_id = "test_id" - response_data = {"test": "data"} - - # Create MsgWrapper objects as expected by _send_response - msg_wrappers = [MsgWrapper(data=response_data)] - - # Clear the cache and get the producer - function._get_producer.cache_clear() - producer = function._get_producer(response_topic) - - # Mock send_async to raise an exception - def mock_send_async_with_error(data, callback, **kwargs): - raise Exception("Send error") - - producer.send_async = mock_send_async_with_error - - with pytest.raises(Exception, match="Send error"): - await function._send_response(response_topic, request_id, msg_wrappers) diff --git a/sdks/fs-python/tests/test_metrics.py b/sdks/fs-python/tests/test_metrics.py deleted file mode 100644 index 85a26a2f..00000000 --- a/sdks/fs-python/tests/test_metrics.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Unit tests for the Metrics and MetricsServer classes. -""" - -import json - -import pytest -from aiohttp.test_utils import make_mocked_request - -from function_stream import Metrics, MetricsServer - - -@pytest.fixture -def metrics(): - return Metrics() - - -@pytest.fixture -def metrics_server(metrics): - return MetricsServer(metrics, host='127.0.0.1', port=9099) - - -class TestMetrics: - def test_initial_state(self, metrics): - """Test initial state of metrics""" - assert metrics.total_requests == 0 - assert metrics.active_requests == 0 - assert metrics.successful_requests == 0 - assert metrics.failed_requests == 0 - assert metrics.request_latency == 0.0 - assert metrics.last_request_time == 0.0 - assert metrics.total_events == 0 - assert metrics.successful_events == 0 - assert metrics.failed_events == 0 - - def test_record_request_start(self, metrics): - """Test recording request start""" - metrics.record_request_start() - assert metrics.total_requests == 1 - assert metrics.active_requests == 1 - assert metrics.last_request_time > 0 - - def test_record_request_end_success(self, metrics): - """Test recording successful request end""" - metrics.record_request_start() - metrics.record_request_end(success=True, latency=0.5) - assert metrics.active_requests == 0 - assert metrics.successful_requests == 1 - assert metrics.failed_requests == 0 - assert metrics.request_latency == 0.5 - - def test_record_request_end_failure(self, metrics): - """Test recording failed request end""" - metrics.record_request_start() - metrics.record_request_end(success=False, latency=0.5) - assert metrics.active_requests == 0 - assert metrics.successful_requests == 0 - assert metrics.failed_requests == 1 - assert metrics.request_latency == 0.5 - - def test_record_event_success(self, metrics): - """Test recording successful event""" - metrics.record_event(success=True) - assert metrics.total_events == 1 - assert metrics.successful_events == 1 - assert metrics.failed_events == 0 - - def test_record_event_failure(self, metrics): - """Test recording failed event""" - metrics.record_event(success=False) - assert metrics.total_events == 1 - assert metrics.successful_events == 0 - assert metrics.failed_events == 1 - - def test_get_metrics_empty(self, metrics): - """Test getting metrics when no data has been recorded""" - metrics_data = metrics.get_metrics() - assert metrics_data['fs_total_requests'] == 0 - assert metrics_data['fs_active_requests'] == 0 - assert metrics_data['fs_successful_requests'] == 0 - assert metrics_data['fs_failed_requests'] == 0 - assert metrics_data['fs_request_latency_seconds'] == 0.0 - assert metrics_data['fs_total_events'] == 0 - assert metrics_data['fs_successful_events'] == 0 - assert metrics_data['fs_failed_events'] == 0 - assert metrics_data['fs_request_success_rate'] == 0 - assert metrics_data['fs_event_success_rate'] == 0 - - def test_get_metrics_with_data(self, metrics): - """Test getting metrics with recorded data""" - # Record some requests - metrics.record_request_start() - metrics.record_request_end(success=True, latency=0.5) - metrics.record_request_start() - metrics.record_request_end(success=False, latency=0.3) - - # Record some events - metrics.record_event(success=True) - metrics.record_event(success=True) - metrics.record_event(success=False) - - metrics_data = metrics.get_metrics() - assert metrics_data['fs_total_requests'] == 2 - assert metrics_data['fs_active_requests'] == 0 - assert metrics_data['fs_successful_requests'] == 1 - assert metrics_data['fs_failed_requests'] == 1 - assert metrics_data['fs_request_latency_seconds'] == 0.3 - assert metrics_data['fs_total_events'] == 3 - assert metrics_data['fs_successful_events'] == 2 - assert metrics_data['fs_failed_events'] == 1 - assert metrics_data['fs_request_success_rate'] == 0.5 - assert metrics_data['fs_event_success_rate'] == 2 / 3 - - -@pytest.mark.asyncio -class TestMetricsServer: - async def test_handle_root(self, metrics_server): - """Test root endpoint handler""" - request = make_mocked_request('GET', '/') - response = await metrics_server.handle_root(request) - assert response.status == 200 - text = response.text - assert "FS SDK Metrics Server" in text - - async def test_handle_metrics_empty(self, metrics_server): - """Test metrics endpoint with no data""" - request = make_mocked_request('GET', '/metrics') - response = await metrics_server.handle_metrics(request) - assert response.status == 200 - data = json.loads(response.text) - assert data['fs_total_requests'] == 0 - assert data['fs_active_requests'] == 0 - - async def test_handle_metrics_with_data(self, metrics_server): - """Test metrics endpoint with recorded data""" - # Record some data - metrics_server.metrics.record_request_start() - metrics_server.metrics.record_request_end(success=True, latency=0.5) - metrics_server.metrics.record_event(success=True) - - request = make_mocked_request('GET', '/metrics') - response = await metrics_server.handle_metrics(request) - assert response.status == 200 - data = json.loads(response.text) - assert data['fs_total_requests'] == 1 - assert data['fs_successful_requests'] == 1 - assert data['fs_total_events'] == 1 - assert data['fs_successful_events'] == 1 - - async def test_server_start_stop(self, metrics_server): - """Test starting and stopping the metrics server""" - # Start server - await metrics_server.start() - assert metrics_server.runner is not None - - # Stop server - await metrics_server.stop() - # Note: runner is not set to None after cleanup in aiohttp - # We just verify that the server was started and stopped successfully diff --git a/server/config.go b/server/config.go deleted file mode 100644 index c86fff42..00000000 --- a/server/config.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "fmt" - "os" - "strings" - - "github.com/functionstream/function-stream/common/config" - - "github.com/go-playground/validator/v10" - - "github.com/spf13/viper" -) - -type FactoryConfig struct { - // Deprecate - Ref *string `mapstructure:"ref"` - Type *string `mapstructure:"type"` - Config *config.ConfigMap `mapstructure:"config"` -} - -type StateStoreConfig struct { - Type *string `mapstructure:"type"` - Config config.ConfigMap `mapstructure:"config"` -} - -type QueueConfig struct { - Type string `mapstructure:"type"` - Config config.ConfigMap `mapstructure:"config"` -} - -type Config struct { - // ListenAddr is the address that the function stream REST service will listen on. - ListenAddr string `mapstructure:"listen-addr"` - - Queue QueueConfig `mapstructure:"queue"` - - TubeConfig map[string]config.ConfigMap `mapstructure:"tube-config"` - - RuntimeConfig map[string]config.ConfigMap `mapstructure:"runtime-config"` - - // StateStore is the configuration for the state store that the function stream server will use. - // Optional - StateStore *StateStoreConfig `mapstructure:"state-store"` - - // FunctionStore is the path to the function store - FunctionStore string `mapstructure:"function-store"` - - EnableTLS bool `mapstructure:"enable-tls"` - TLSCertFile string `mapstructure:"tls-cert-file"` - TLSKeyFile string `mapstructure:"tls-key-file"` -} - -func init() { - viper.SetDefault("listen-addr", ":7300") - viper.SetDefault("function-store", "./functions") -} - -func (c *Config) PreprocessConfig() error { - if c.ListenAddr == "" { - return fmt.Errorf("ListenAddr shouldn't be empty") - } - validate := validator.New() - if err := validate.Struct(c); err != nil { - return err - } - return nil -} - -func loadConfig() (*Config, error) { - var c Config - if err := viper.Unmarshal(&c); err != nil { - return nil, err - } - if err := c.PreprocessConfig(); err != nil { - return nil, err - } - return &c, nil -} - -const envPrefix = "FS_" - -func LoadConfigFromFile(filePath string) (*Config, error) { - viper.SetConfigFile(filePath) - if err := viper.ReadInConfig(); err != nil { - return nil, err - } - return loadConfig() -} - -func LoadConfigFromEnv() (*Config, error) { - for _, env := range os.Environ() { - if strings.HasPrefix(env, envPrefix) { - parts := strings.SplitN(strings.TrimPrefix(env, envPrefix), "=", 2) - key := parts[0] - value := parts[1] - - key = strings.Replace(key, "__", ".", -1) - key = strings.Replace(key, "_", "-", -1) - viper.Set(key, value) - } - } - - return loadConfig() -} diff --git a/server/config_test.go b/server/config_test.go deleted file mode 100644 index 821ece59..00000000 --- a/server/config_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "os" - "testing" - - "github.com/spf13/viper" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLoadConfigFromYaml(t *testing.T) { - c, err := LoadConfigFromFile("../tests/test_config.yaml") - require.Nil(t, err) - assertConfig(t, c) -} - -func TestLoadConfigFromJson(t *testing.T) { - c, err := LoadConfigFromFile("../tests/test_config.json") - require.Nil(t, err) - assertConfig(t, c) -} - -func TestLoadConfigFromEnv(t *testing.T) { - assert.Nil(t, os.Setenv("FS_LISTEN_ADDR", ":17300")) - assert.Nil(t, os.Setenv("FS_TUBE_CONFIG__MY_TUBE__KEY", "value")) - assert.Nil(t, os.Setenv("FS_RUNTIME_CONFIG__CUSTOM_RUNTIME__NAME", "test")) - - viper.AutomaticEnv() - - c, err := LoadConfigFromEnv() - require.Nil(t, err) - assertConfig(t, c) -} - -func assertConfig(t *testing.T, c *Config) { - assert.Equal(t, ":17300", c.ListenAddr) - require.Contains(t, c.TubeConfig, "my-tube") - assert.Equal(t, "value", c.TubeConfig["my-tube"]["key"]) - - require.Contains(t, c.RuntimeConfig, "custom-runtime") - assert.Equal(t, "test", c.RuntimeConfig["custom-runtime"]["name"]) -} diff --git a/server/function_service.go b/server/function_service.go deleted file mode 100644 index 4168103b..00000000 --- a/server/function_service.go +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "errors" - "net/http" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" -) - -func (s *Server) makeFunctionService() *restful.WebService { - ws := new(restful.WebService) - ws.Path("/api/v1/function"). - Consumes(restful.MIME_JSON). - Produces(restful.MIME_JSON) - - tags := []string{"function"} - - ws.Route(ws.GET("/"). - To(func(request *restful.Request, response *restful.Response) { - functions := s.Manager.ListFunctions() - s.handleRestError(response.WriteEntity(functions)) - }). - Doc("get all functions"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("getAllFunctions"). - Returns(http.StatusOK, "OK", []string{}). - Writes([]string{})) - - ws.Route(ws.POST("/"). - To(func(request *restful.Request, response *restful.Response) { - function := model.Function{} - err := request.ReadEntity(&function) - if err != nil { - s.handleRestError(response.WriteError(http.StatusBadRequest, err)) - return - } - err = s.Manager.StartFunction(&function) - if err != nil { - if errors.Is(err, common.ErrorFunctionExists) { - s.handleRestError(response.WriteError(http.StatusConflict, err)) - return - } - s.handleRestError(response.WriteError(http.StatusBadRequest, err)) - return - } - response.WriteHeader(http.StatusOK) - }). - Doc("create a function"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("createFunction"). - Reads(model.Function{})) - - deleteFunctionHandler := func(response *restful.Response, namespace, name string) { - err := s.Manager.DeleteFunction(namespace, name) - if err != nil { - if errors.Is(err, common.ErrorFunctionNotFound) { - s.handleRestError(response.WriteError(http.StatusNotFound, err)) - return - } - s.handleRestError(response.WriteError(http.StatusBadRequest, err)) - return - } - response.WriteHeader(http.StatusOK) - } - - ws.Route(ws.DELETE("/{name}"). - To(func(request *restful.Request, response *restful.Response) { - name := request.PathParameter("name") - deleteFunctionHandler(response, "", name) - }). - Doc("delete a function"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("deleteFunction"). - Param(ws.PathParameter("name", "name of the function").DataType("string"))) - - ws.Route(ws.DELETE("/{namespace}/{name}"). - To(func(request *restful.Request, response *restful.Response) { - namespace := request.PathParameter("namespace") - name := request.PathParameter("name") - deleteFunctionHandler(response, namespace, name) - }). - Doc("delete a namespaced function"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("deleteNamespacedFunction"). - Param(ws.PathParameter("name", "name of the function").DataType("string")). - Param(ws.PathParameter("namespace", "namespace of the function").DataType("string"))) - - return ws -} diff --git a/server/function_store.go b/server/function_store.go deleted file mode 100644 index 1d8d609d..00000000 --- a/server/function_store.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "io" - "log/slog" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/functionstream/function-stream/common" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs" - "github.com/pkg/errors" - "gopkg.in/yaml.v3" -) - -var log = common.NewDefaultLogger() - -type FunctionStore interface { - Load() error -} - -type FunctionStoreImpl struct { - mu sync.Mutex - fm fs.FunctionManager - path string - loadedFunctions map[string]*model.Function - loadingFunctions map[string]*model.Function -} - -func (f *FunctionStoreImpl) Load() error { - f.mu.Lock() - defer f.mu.Unlock() - f.loadingFunctions = make(map[string]*model.Function) - info, err := os.Stat(f.path) - if err != nil { - if os.IsNotExist(err) { - log.Info("the path to the function store does not exist. skip loading functions", "path", f.path) - return nil - } - return errors.Wrapf(err, "the path to the function store %s is invalid", f.path) - } - if !info.IsDir() { - err = f.loadFile(f.path) - if err != nil { - return err - } - } else { - err = filepath.Walk(f.path, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if strings.HasSuffix(info.Name(), ".yaml") || strings.HasSuffix(info.Name(), ".yml") { - err := f.loadFile(path) - if err != nil { - return err - } - } - return nil - }) - if err != nil { - return err - } - } - - for key, value := range f.loadingFunctions { - if _, exists := f.loadedFunctions[key]; !exists { - err := f.fm.StartFunction(value) - if err != nil { - return err - } - } - } - - for key, value := range f.loadedFunctions { - if _, exists := f.loadingFunctions[key]; !exists { - err := f.fm.DeleteFunction(value.Namespace, value.Name) - if err != nil { - return err - } - } - } - - f.loadedFunctions = f.loadingFunctions - slog.Info("functions loaded", "loadedFunctionsCount", len(f.loadedFunctions)) - return nil -} - -func (f *FunctionStoreImpl) loadFile(path string) error { - data, err := os.ReadFile(path) - if err != nil { - return err - } - dec := yaml.NewDecoder(strings.NewReader(string(data))) - for { - var function model.Function - err := dec.Decode(&function) - if err != nil { - if err == io.EOF { - break - } - return err - } - if err := function.Validate(); err != nil { - return errors.Wrapf(err, "function %s is invalid", function.Name) - } - if _, ok := f.loadingFunctions[function.Name]; ok { - return errors.Errorf("duplicated function %s", function.Name) - } - f.loadingFunctions[function.Name] = &function - } - return nil -} - -func NewFunctionStoreImpl(fm fs.FunctionManager, path string) (FunctionStore, error) { - return &FunctionStoreImpl{ - fm: fm, - path: path, - }, nil -} - -type FunctionStoreDisabled struct { -} - -func (f *FunctionStoreDisabled) Load() error { - return nil -} - -func NewFunctionStoreDisabled() FunctionStore { - return &FunctionStoreDisabled{} -} - -func (s *Server) makeFunctionStoreService() *restful.WebService { - ws := new(restful.WebService) - ws.Path("/api/v1/function-store") - - tags := []string{"function-store"} - - ws.Route(ws.GET("/reload"). - To(func(request *restful.Request, response *restful.Response) { - err := s.FunctionStore.Load() - if err != nil { - s.handleRestError(response.WriteErrorString(400, err.Error())) - return - } - response.WriteHeader(200) - }). - Doc("reload functions from the function store"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("reloadFunctions"). - Returns(http.StatusOK, "OK", nil)) - - return ws -} diff --git a/server/function_store_test.go b/server/function_store_test.go deleted file mode 100644 index 2651d114..00000000 --- a/server/function_store_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "os" - "testing" - - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v3" -) - -type testFunctionManagerImpl struct { - functions map[common.NamespacedName]*model.Function -} - -func (t *testFunctionManagerImpl) StartFunction(f *model.Function) error { - t.functions[common.GetNamespacedName(f.Namespace, f.Name)] = f - return nil -} - -func (t *testFunctionManagerImpl) DeleteFunction(namespace, name string) error { - delete(t.functions, common.GetNamespacedName(namespace, name)) - return nil -} - -func (t *testFunctionManagerImpl) ListFunctions() []string { - return nil -} - -func (t *testFunctionManagerImpl) ProduceEvent(_ string, _ contube.Record) error { - return nil -} - -func (t *testFunctionManagerImpl) ConsumeEvent(_ string) (contube.Record, error) { - return nil, nil -} - -func (t *testFunctionManagerImpl) GetStateStore() (api.StateStore, error) { - return nil, nil -} - -func (t *testFunctionManagerImpl) Close() error { - return nil -} - -func newTestFunctionManagerImpl() fs.FunctionManager { - return &testFunctionManagerImpl{ - functions: make(map[common.NamespacedName]*model.Function), - } -} - -func createTestFunction(name string) *model.Function { - return &model.Function{ - Runtime: model.RuntimeConfig{ - Type: common.WASMRuntime, - Config: map[string]interface{}{ - common.RuntimeArchiveConfigKey: "../bin/example_basic.wasm", - }, - }, - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{"input"}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: "output", - }).ToConfigMap(), - }, - State: map[string]interface{}{}, - Name: name, - Replicas: 1, - Config: map[string]string{}, - } -} - -const yamlSeparator string = "---\n" - -func TestFunctionStoreLoading(t *testing.T) { - tmpfile, err := os.CreateTemp("", "*.yaml") - assert.Nil(t, err) - //defer os.Remove(tmpfile.Name()) - - fm := newTestFunctionManagerImpl() - functionStore, err := NewFunctionStoreImpl(fm, tmpfile.Name()) - assert.Nil(t, err) - - f1 := createTestFunction("f1") - - f1Data, err := yaml.Marshal(&f1) - assert.Nil(t, err) - - _, err = tmpfile.Write(f1Data) - assert.Nil(t, err) - - assert.Nil(t, functionStore.Load()) - - assert.Len(t, fm.(*testFunctionManagerImpl).functions, 1) - assert.Equal(t, f1, fm.(*testFunctionManagerImpl).functions[common.GetNamespacedName("", "f1")]) - - f2 := createTestFunction("f2") - _, err = tmpfile.WriteString(yamlSeparator) - assert.Nil(t, err) - f2Data, err := yaml.Marshal(f2) - assert.Nil(t, err) - _, err = tmpfile.Write(f2Data) - assert.Nil(t, err) - - assert.Nil(t, functionStore.Load()) - assert.Len(t, fm.(*testFunctionManagerImpl).functions, 2) - assert.Equal(t, f1, fm.(*testFunctionManagerImpl).functions[common.GetNamespacedName("", "f1")]) - assert.Equal(t, f2, fm.(*testFunctionManagerImpl).functions[common.GetNamespacedName("", "f2")]) - - assert.Nil(t, tmpfile.Close()) - - tmpfile, err = os.Create(tmpfile.Name()) // Overwrite the file - assert.Nil(t, err) - - _, err = tmpfile.Write(f2Data) - assert.Nil(t, err) - - assert.Nil(t, functionStore.Load()) - assert.Len(t, fm.(*testFunctionManagerImpl).functions, 1) - assert.Equal(t, f2, fm.(*testFunctionManagerImpl).functions[common.GetNamespacedName("", "f2")]) -} diff --git a/server/http_tube_service.go b/server/http_tube_service.go deleted file mode 100644 index 71fa75da..00000000 --- a/server/http_tube_service.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "net/http" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" -) - -func (s *Server) makeHttpTubeService() *restful.WebService { - ws := new(restful.WebService) - ws.Path("/api/v1/http-tube"). - Consumes(restful.MIME_JSON). - Produces(restful.MIME_JSON) - - tags := []string{"http-tube"} - - ws.Route(ws.POST("/{endpoint}"). - To(func(request *restful.Request, response *restful.Response) { - s.options.httpTubeFact.GetHandleFunc(func(r *http.Request) (string, error) { - return request.PathParameter("endpoint"), nil - }, s.log)(response.ResponseWriter, request.Request) - }). - Doc("trigger the http tube endpoint"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Param(ws.PathParameter("endpoint", "Endpoint").DataType("string")). - Reads(bytesSchema). - Operation("triggerHttpTubeEndpoint")) - return ws -} diff --git a/server/server.go b/server/server.go deleted file mode 100644 index c316931a..00000000 --- a/server/server.go +++ /dev/null @@ -1,548 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "sync/atomic" - "time" - - "github.com/functionstream/function-stream/common/config" - - "github.com/functionstream/function-stream/fs/runtime/external" - - "github.com/go-logr/logr" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/fs" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/functionstream/function-stream/fs/runtime/wazero" - "github.com/functionstream/function-stream/fs/statestore" - "github.com/go-openapi/spec" - "github.com/pkg/errors" -) - -var ( - ErrUnsupportedStateStore = errors.New("unsupported state store") - ErrUnsupportedQueueType = errors.New("unsupported queue type") -) - -type Server struct { - options *serverOptions - httpSvr atomic.Pointer[http.Server] - log *common.Logger - Manager fs.FunctionManager - FunctionStore FunctionStore -} - -type TubeLoaderType func(c *FactoryConfig) (contube.TubeFactory, error) -type RuntimeLoaderType func(c *FactoryConfig) (api.FunctionRuntimeFactory, error) -type StateStoreProviderType func(c *StateStoreConfig) (api.StateStoreFactory, error) - -type serverOptions struct { - httpListener net.Listener - managerOpts []fs.ManagerOption - httpTubeFact *contube.HttpTubeFactory - stateStoreProvider StateStoreProviderType - functionStore string - enableTls bool - tlsCertFile string - tlsKeyFile string - tubeFactoryBuilders map[string]func(configMap config.ConfigMap) (contube.TubeFactory, error) - tubeConfig map[string]config.ConfigMap - runtimeFactoryBuilders map[string]func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) - runtimeConfig map[string]config.ConfigMap - queueConfig QueueConfig - log *logr.Logger -} - -type ServerOption interface { - apply(option *serverOptions) (*serverOptions, error) -} - -type serverOptionFunc func(*serverOptions) (*serverOptions, error) - -func (f serverOptionFunc) apply(c *serverOptions) (*serverOptions, error) { - return f(c) -} - -// WithHttpListener sets the listener for the HTTP server. -// If not set, the server will listen on the Config.ListenAddr. -func WithHttpListener(listener net.Listener) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.httpListener = listener - return o, nil - }) -} - -// WithHttpTubeFactory sets the factory for the HTTP tube. -// If not set, the server will use the default HTTP tube factory. -func WithHttpTubeFactory(factory *contube.HttpTubeFactory) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.httpTubeFact = factory - return o, nil - }) -} - -func WithQueueConfig(config QueueConfig) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.queueConfig = config - return o, nil - }) -} - -func WithTubeFactoryBuilder( - name string, - builder func(configMap config.ConfigMap) (contube.TubeFactory, error), -) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.tubeFactoryBuilders[name] = builder - return o, nil - }) -} - -func WithTubeFactoryBuilders( - builder map[string]func(configMap config.ConfigMap, - ) (contube.TubeFactory, error)) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - for n, b := range builder { - o.tubeFactoryBuilders[n] = b - } - return o, nil - }) -} - -func WithRuntimeFactoryBuilder( - name string, - builder func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error), -) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.runtimeFactoryBuilders[name] = builder - return o, nil - }) -} - -func WithRuntimeFactoryBuilders( - builder map[string]func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error), -) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - for n, b := range builder { - o.runtimeFactoryBuilders[n] = b - } - return o, nil - }) -} - -func WithStateStoreLoader(loader func(c *StateStoreConfig) (api.StateStoreFactory, error)) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.stateStoreProvider = loader - return o, nil - }) -} - -func WithPackageLoader(packageLoader api.PackageLoader) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - o.managerOpts = append(o.managerOpts, fs.WithPackageLoader(packageLoader)) - return o, nil - }) -} - -func WithLogger(log *logr.Logger) ServerOption { - return serverOptionFunc(func(c *serverOptions) (*serverOptions, error) { - c.log = log - return c, nil - }) -} - -func GetBuiltinTubeFactoryBuilder() map[string]func(configMap config.ConfigMap) (contube.TubeFactory, error) { - return map[string]func(configMap config.ConfigMap) (contube.TubeFactory, error){ - common.PulsarTubeType: func(configMap config.ConfigMap) (contube.TubeFactory, error) { - return contube.NewPulsarEventQueueFactory(context.Background(), contube.ConfigMap(configMap)) - }, - //nolint:unparam - common.MemoryTubeType: func(_ config.ConfigMap) (contube.TubeFactory, error) { - return contube.NewMemoryQueueFactory(context.Background()), nil - }, - //nolint:unparam - common.EmptyTubeType: func(_ config.ConfigMap) (contube.TubeFactory, error) { - return contube.NewEmptyTubeFactory(), nil - }, - } -} - -func GetBuiltinRuntimeFactoryBuilder() map[string]func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - return map[string]func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error){ - //nolint:unparam - common.WASMRuntime: func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - return wazero.NewWazeroFunctionRuntimeFactory(), nil - }, - common.ExternalRuntime: func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - return external.NewFactoryWithConfig(configMap) - }, - } -} - -func setupFactories[T any](factoryBuilder map[string]func(configMap config.ConfigMap) (T, error), - config map[string]config.ConfigMap, -) (map[string]T, error) { - factories := make(map[string]T) - for name, builder := range factoryBuilder { - f, err := builder(config[name]) - if err != nil { - return nil, fmt.Errorf("error creating factory [%s] %w", name, err) - } - factories[name] = f - } - return factories, nil -} - -func DefaultStateStoreProvider(c *StateStoreConfig) (api.StateStoreFactory, error) { - switch strings.ToLower(*c.Type) { - case common.StateStorePebble: - return statestore.NewPebbleStateStoreFactory(c.Config) - } - return statestore.NewDefaultPebbleStateStoreFactory() -} - -func WithConfig(config *Config) ServerOption { - return serverOptionFunc(func(o *serverOptions) (*serverOptions, error) { - ln, err := net.Listen("tcp", config.ListenAddr) - if err != nil { - return nil, err - } - o.httpListener = ln - o.enableTls = config.EnableTLS - if o.enableTls { - if config.TLSCertFile == "" || config.TLSKeyFile == "" { - return nil, fmt.Errorf("TLS certificate and key file must be provided") - } - o.tlsCertFile = config.TLSCertFile - o.tlsKeyFile = config.TLSKeyFile - } - o.tubeConfig = config.TubeConfig - o.queueConfig = config.Queue - o.runtimeConfig = config.RuntimeConfig - if config.StateStore != nil { - stateStoreFactory, err := o.stateStoreProvider(config.StateStore) - if err != nil { - return nil, err - } - o.managerOpts = append(o.managerOpts, fs.WithStateStoreFactory(stateStoreFactory)) - } - o.functionStore = config.FunctionStore - return o, nil - }) -} - -func NewServer(opts ...ServerOption) (*Server, error) { - options := &serverOptions{} - options.tubeFactoryBuilders = make(map[string]func(configMap config.ConfigMap) (contube.TubeFactory, error)) - options.tubeConfig = make(map[string]config.ConfigMap) - options.runtimeFactoryBuilders = make(map[string]func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error)) - options.runtimeConfig = make(map[string]config.ConfigMap) - options.stateStoreProvider = DefaultStateStoreProvider - options.managerOpts = []fs.ManagerOption{} - for _, o := range opts { - if o == nil { - continue - } - _, err := o.apply(options) - if err != nil { - return nil, err - } - } - var log *common.Logger - if options.log == nil { - log = common.NewDefaultLogger() - } else { - log = common.NewLogger(options.log) - } - if options.httpTubeFact == nil { - options.httpTubeFact = contube.NewHttpTubeFactory(context.Background()) - log.Info("Using the default HTTP tube factory") - } - options.managerOpts = append(options.managerOpts, - fs.WithTubeFactory("http", options.httpTubeFact), - fs.WithLogger(log.Logger)) - - // Config Tube Factory - if tubeFactories, err := setupFactories(options.tubeFactoryBuilders, options.tubeConfig); err == nil { - for name, f := range tubeFactories { - options.managerOpts = append(options.managerOpts, fs.WithTubeFactory(name, f)) - } - } else { - return nil, err - } - - // Config Runtime Factory - if runtimeFactories, err := setupFactories(options.runtimeFactoryBuilders, options.runtimeConfig); err == nil { - for name, f := range runtimeFactories { - options.managerOpts = append(options.managerOpts, fs.WithRuntimeFactory(name, f)) - } - } else { - return nil, err - } - - // Config Queue Factory - if options.queueConfig.Type != "" { - queueFactoryBuilder, ok := options.tubeFactoryBuilders[options.queueConfig.Type] - if !ok { - return nil, fmt.Errorf("%w, queueType: %s", ErrUnsupportedQueueType, options.queueConfig.Type) - } - queueFactory, err := queueFactoryBuilder(options.queueConfig.Config) - if err != nil { - return nil, fmt.Errorf("error creating queue factory %w", err) - } - options.managerOpts = append(options.managerOpts, fs.WithQueueFactory(queueFactory)) - } - - manager, err := fs.NewFunctionManager(options.managerOpts...) - if err != nil { - return nil, err - } - if options.httpListener == nil { - options.httpListener, err = net.Listen("tcp", "localhost:7300") - if err != nil { - return nil, err - } - } - var functionStore FunctionStore - if options.functionStore != "" { - functionStore, err = NewFunctionStoreImpl(manager, options.functionStore) - if err != nil { - return nil, err - } - } else { - functionStore = NewFunctionStoreDisabled() - } - err = functionStore.Load() - if err != nil { - return nil, err - } - return &Server{ - options: options, - Manager: manager, - log: log, - FunctionStore: functionStore, - }, nil -} - -func NewDefaultServer() (*Server, error) { - defaultConfig := &Config{ - ListenAddr: ":7300", - Queue: QueueConfig{ - Type: common.MemoryTubeType, - Config: config.ConfigMap{}, - }, - TubeConfig: map[string]config.ConfigMap{ - common.PulsarTubeType: { - contube.PulsarURLKey: "pulsar://localhost:6650", - }, - }, - RuntimeConfig: map[string]config.ConfigMap{}, - } - return NewServer( - WithTubeFactoryBuilders(GetBuiltinTubeFactoryBuilder()), - WithRuntimeFactoryBuilders(GetBuiltinRuntimeFactoryBuilder()), - WithConfig(defaultConfig)) -} - -func (s *Server) Run(context context.Context) { - s.log.Info("Hello from the function stream server!") - go func() { - <-context.Done() - err := s.Close() - if err != nil { - s.log.Error(err, "failed to shutdown server") - return - } - }() - err := s.startRESTHandlers() - if err != nil && !errors.Is(err, http.ErrServerClosed) { - s.log.Error(err, "Error starting REST handlers") - } -} - -func (s *Server) startRESTHandlers() error { - - statusSvr := new(restful.WebService) - statusSvr.Path("/api/v1/status") - statusSvr.Route(statusSvr.GET("/").To(func(request *restful.Request, response *restful.Response) { - response.WriteHeader(http.StatusOK) - }). - Doc("Get the status of the Function Stream"). - Metadata(restfulspec.KeyOpenAPITags, []string{"status"}). - Operation("getStatus")) - - container := restful.NewContainer() - container.Add(s.makeFunctionService()) - container.Add(s.makeTubeService()) - container.Add(s.makeStateService()) - container.Add(s.makeHttpTubeService()) - container.Add(s.makeFunctionStoreService()) - container.Add(statusSvr) - - cors := restful.CrossOriginResourceSharing{ - AllowedHeaders: []string{"Content-Type", "Accept"}, - AllowedMethods: []string{"GET", "POST", "OPTIONS", "PUT", "DELETE"}, - CookiesAllowed: false, - Container: container} - container.Filter(cors.Filter) - container.Filter(container.OPTIONSFilter) - - config := restfulspec.Config{ - WebServices: container.RegisteredWebServices(), - APIPath: "/apidocs", - PostBuildSwaggerObjectHandler: enrichSwaggerObject} - container.Add(restfulspec.NewOpenAPIService(config)) - - httpSvr := &http.Server{ - Handler: container.ServeMux, - } - s.httpSvr.Store(httpSvr) - - if s.options.enableTls { - return httpSvr.ServeTLS(s.options.httpListener, s.options.tlsCertFile, s.options.tlsKeyFile) - } else { - return httpSvr.Serve(s.options.httpListener) - } -} - -func enrichSwaggerObject(swo *spec.Swagger) { - swo.Info = &spec.Info{ - InfoProps: spec.InfoProps{ - Title: "Function Stream Service", - Description: "Manage Function Stream Resources", - Contact: &spec.ContactInfo{ - ContactInfoProps: spec.ContactInfoProps{ - Name: "Function Stream Org", - URL: "https://github.com/FunctionStream", - }, - }, - License: &spec.License{ - LicenseProps: spec.LicenseProps{ - Name: "Apache 2", - URL: "http://www.apache.org/licenses/", - }, - }, - Version: "1.0.0", - }, - } - swo.Host = "localhost:7300" - swo.Schemes = []string{"http"} - swo.Tags = []spec.Tag{ - { - TagProps: spec.TagProps{ - Name: "function", - Description: "Managing functions"}, - }, - { - TagProps: spec.TagProps{ - Name: "tube", - Description: "Managing tubes"}, - }, - { - TagProps: spec.TagProps{ - Name: "state", - Description: "Managing state"}, - }, - { - TagProps: spec.TagProps{ - Name: "http-tube", - Description: "Managing HTTP tubes"}, - }, - { - TagProps: spec.TagProps{ - Name: "function-store", - Description: "Managing function store"}, - }, - } -} - -func (s *Server) WaitForReady(ctx context.Context) <-chan struct{} { - c := make(chan struct{}) - detect := func() bool { - u := (&url.URL{ - Scheme: "http", - Host: s.options.httpListener.Addr().String(), - Path: "/api/v1/status", - }).String() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) - if err != nil { - s.log.Error(err, "Failed to create detect request") - return false - } - client := &http.Client{} - _, err = client.Do(req) - if err != nil { - s.log.Info("Detect connection to server failed", "error", err) - } - s.log.Info("Server is ready", "address", s.options.httpListener.Addr().String()) - return true - } - go func() { - defer close(c) - - if detect() { - return - } - // Try to connect to the server - for { - select { - case <-ctx.Done(): - return - case <-time.After(1 * time.Second): - if detect() { - return - } - } - } - }() - return c -} - -func (s *Server) Close() error { - s.log.Info("Shutting down function stream server") - if httpSvr := s.httpSvr.Load(); httpSvr != nil { - if err := httpSvr.Close(); err != nil { - return err - } - } - if s.Manager != nil { - err := s.Manager.Close() - if err != nil { - return err - } - } - s.log.Info("Function stream server is shut down") - return nil -} - -func (s *Server) handleRestError(e error) { - if e == nil { - return - } - s.log.Error(e, "Error handling REST request") -} diff --git a/server/server_test.go b/server/server_test.go deleted file mode 100644 index 1cfb909a..00000000 --- a/server/server_test.go +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "context" - "encoding/json" - "math/rand" - "net" - "strconv" - "testing" - "time" - - "github.com/nats-io/nats.go" - - "github.com/functionstream/function-stream/common/config" - - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/common/model" - "github.com/functionstream/function-stream/fs/api" - "github.com/functionstream/function-stream/fs/contube" - "github.com/functionstream/function-stream/tests" - "github.com/stretchr/testify/assert" -) - -func getListener(t *testing.T) net.Listener { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Failed to listen: %v", err) - } - t.Logf("Listening on %s\n", ln.Addr().String()) - return ln -} - -func startStandaloneSvr(t *testing.T, ctx context.Context, opts ...ServerOption) (*Server, string) { - ln := getListener(t) - defaultOpts := []ServerOption{ - WithConfig(&Config{ - TubeConfig: map[string]config.ConfigMap{ - common.NatsTubeType: { - "nats_url": "nats://localhost:4222", - }, - }, - }), - WithHttpListener(ln), - WithTubeFactoryBuilders(GetBuiltinTubeFactoryBuilder()), - WithRuntimeFactoryBuilders(GetBuiltinRuntimeFactoryBuilder()), - } - s, err := NewServer( - append(defaultOpts, opts...)..., - ) - if err != nil { - t.Fatal(err) - } - svrCtx, svrCancel := context.WithCancel(context.Background()) - go s.Run(svrCtx) - go func() { - <-ctx.Done() - svrCancel() - }() - return s, ln.Addr().String() -} - -func TestStandaloneBasicFunction(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s, _ := startStandaloneSvr(t, ctx) - - inputTopic := "test-input-" + strconv.Itoa(rand.Int()) - outputTopic := "test-output-" + strconv.Itoa(rand.Int()) - - funcConf := &model.Function{ - Package: "../bin/example_basic.wasm", - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{inputTopic}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: outputTopic, - }).ToConfigMap(), - }, - Name: "test-func", - Replicas: 1, - } - err := s.Manager.StartFunction(funcConf) - if err != nil { - t.Fatal(err) - } - - p := &tests.Person{ - Name: "rbt", - Money: 0, - } - jsonBytes, err := json.Marshal(p) - if err != nil { - t.Fatal(err) - } - err = s.Manager.ProduceEvent(inputTopic, contube.NewRecordImpl(jsonBytes, func() { - })) - if err != nil { - t.Fatal(err) - } - - event, err := s.Manager.ConsumeEvent(outputTopic) - if err != nil { - t.Error(err) - return - } - var out tests.Person - err = json.Unmarshal(event.GetPayload(), &out) - if err != nil { - t.Error(err) - return - } - if out.Money != 1 { - t.Errorf("expected 1, got %d", out.Money) - return - } -} - -func TestHttpTube(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s, httpAddr := startStandaloneSvr(t, ctx, nil, nil) - - endpoint := "test-endpoint" - funcConf := &model.Function{ - Package: "../bin/example_basic.wasm", - Sources: []model.TubeConfig{{ - Type: common.HttpTubeType, - Config: map[string]interface{}{ - contube.EndpointKey: endpoint, - }, - }}, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: "output", - }).ToConfigMap(), - }, - Name: "test-func", - Replicas: 1, - } - - err := s.Manager.StartFunction(funcConf) - assert.Nil(t, err) - - p := &tests.Person{ - Name: "rbt", - Money: 0, - } - jsonBytes, err := json.Marshal(p) - if err != nil { - t.Fatal(err) - } - - cfg := adminclient.NewConfiguration() - cfg.Host = httpAddr - cli := adminclient.NewAPIClient(cfg) - _, err = cli.HttpTubeAPI.TriggerHttpTubeEndpoint(ctx, endpoint).Body(string(jsonBytes)).Execute() - assert.Nil(t, err) - - event, err := s.Manager.ConsumeEvent("output") - if err != nil { - t.Error(err) - return - } - var out tests.Person - err = json.Unmarshal(event.GetPayload(), &out) - if err != nil { - t.Error(err) - return - } - if out.Money != 1 { - t.Errorf("expected 1, got %d", out.Money) - return - } -} - -func TestNatsTube(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s, _ := startStandaloneSvr(t, ctx, WithTubeFactoryBuilder(common.NatsTubeType, - func(configMap config.ConfigMap) (contube.TubeFactory, error) { - return contube.NewNatsEventQueueFactory(context.Background(), contube.ConfigMap(configMap)) - }), nil) - - funcConf := &model.Function{ - Package: "../bin/example_basic.wasm", - Sources: []model.TubeConfig{{ - Type: common.NatsTubeType, - Config: map[string]interface{}{ - "subject": "input", - }, - }}, - Sink: model.TubeConfig{ - Type: common.NatsTubeType, - Config: map[string]interface{}{ - "subject": "output", - }, - }, - Name: "test-func", - Replicas: 1, - } - - err := s.Manager.StartFunction(funcConf) - assert.Nil(t, err) - - p := &tests.Person{ - Name: "rbt", - Money: 0, - } - jsonBytes, err := json.Marshal(p) - if err != nil { - t.Fatal(err) - } - - nc, err := nats.Connect("nats://localhost:4222") - assert.NoError(t, err) - - sub, err := nc.SubscribeSync("output") - assert.NoError(t, err) - - assert.NoError(t, nc.Publish("input", jsonBytes)) - - event, err := sub.NextMsg(3 * time.Second) - if err != nil { - t.Error(err) - return - } - var out tests.Person - err = json.Unmarshal(event.Data, &out) - if err != nil { - t.Error(err) - return - } - if out.Money != 1 { - t.Errorf("expected 1, got %d", out.Money) - return - } -} - -type MockRuntimeFactory struct { -} - -func (f *MockRuntimeFactory) NewFunctionRuntime(instance api.FunctionInstance, - _ *model.RuntimeConfig) (api.FunctionRuntime, error) { - return &MockRuntime{ - funcCtx: instance.FunctionContext(), - }, nil -} - -type MockRuntime struct { - funcCtx api.FunctionContext -} - -func (r *MockRuntime) WaitForReady() <-chan error { - c := make(chan error) - close(c) - return c -} - -func (r *MockRuntime) Call(e contube.Record) (contube.Record, error) { - v, err := r.funcCtx.GetState(context.Background(), "key") - if err != nil { - return nil, err - } - str := string(v) - err = r.funcCtx.PutState(context.Background(), "key", []byte(str+"!")) - if err != nil { - return nil, err - } - return contube.NewRecordImpl(nil, func() { - - }), nil -} - -func (r *MockRuntime) Stop() { -} - -func TestStatefulFunction(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - s, httpAddr := startStandaloneSvr(t, ctx, - WithRuntimeFactoryBuilder("mock", func(configMap config.ConfigMap) (api.FunctionRuntimeFactory, error) { - return &MockRuntimeFactory{}, nil - })) - - input := "input" - output := "output" - funcConf := &model.Function{ - Name: "test-func", - Runtime: model.RuntimeConfig{ - Type: "mock", - }, - Sources: []model.TubeConfig{ - { - Type: common.MemoryTubeType, - Config: (&contube.SourceQueueConfig{ - Topics: []string{input}, - SubName: "test", - }).ToConfigMap(), - }, - }, - Sink: model.TubeConfig{ - Type: common.MemoryTubeType, - Config: (&contube.SinkQueueConfig{ - Topic: "output", - }).ToConfigMap(), - }, - Replicas: 1, - } - err := s.Manager.StartFunction(funcConf) - if err != nil { - t.Fatal(err) - } - - cfg := adminclient.NewConfiguration() - cfg.Host = httpAddr - cli := adminclient.NewAPIClient(cfg) - - _, err = cli.StateAPI.SetState(ctx, "key").Body("hello").Execute() - assert.Nil(t, err) - - err = s.Manager.ProduceEvent(input, contube.NewRecordImpl(nil, func() { - })) - assert.Nil(t, err) - - _, err = s.Manager.ConsumeEvent(output) - assert.Nil(t, err) - - result, _, err := cli.StateAPI.GetState(ctx, "key").Execute() - assert.Nil(t, err) - assert.Equal(t, "hello!", result) -} diff --git a/server/state_service.go b/server/state_service.go deleted file mode 100644 index c942638f..00000000 --- a/server/state_service.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "io" - "net/http" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" - "github.com/pkg/errors" -) - -func (s *Server) makeStateService() *restful.WebService { - ws := new(restful.WebService) - ws.Path("/api/v1/state") - - tags := []string{"state"} - - keyParam := ws.PathParameter("key", "state key").DataType("string") - - ws.Route(ws.POST("/{key}"). - To(func(request *restful.Request, response *restful.Response) { - key := request.PathParameter("key") - - state, err := s.Manager.GetStateStore() - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - - body := request.Request.Body - defer func() { - s.handleRestError(body.Close()) - }() - - content, err := io.ReadAll(body) - if err != nil { - s.handleRestError(response.WriteError(http.StatusBadRequest, errors.Wrap(err, "Failed to read body"))) - return - } - - err = state.PutState(request.Request.Context(), key, content) - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - }). - Doc("set a state"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("setState"). - Param(keyParam). - Reads(bytesSchema)) - - ws.Route(ws.GET("/{key}"). - To(func(request *restful.Request, response *restful.Response) { - key := request.PathParameter("key") - state, err := s.Manager.GetStateStore() - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - - content, err := state.GetState(request.Request.Context(), key) - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - - _, err = response.Write(content) - s.handleRestError(err) - }). - Doc("get a state"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("getState"). - Writes(bytesSchema). - Returns(http.StatusOK, "OK", bytesSchema). - Param(keyParam)) - - return ws -} diff --git a/server/tube_service.go b/server/tube_service.go deleted file mode 100644 index adc8ea7f..00000000 --- a/server/tube_service.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package server - -import ( - "io" - "net/http" - - restfulspec "github.com/emicklei/go-restful-openapi/v2" - "github.com/emicklei/go-restful/v3" - "github.com/functionstream/function-stream/fs/contube" -) - -// Due to this issue: https://github.com/emicklei/go-restful-openapi/issues/115, -// we need to use this schema to specify the format of the byte array. -var bytesSchema = restfulspec.SchemaType{RawType: "string", Format: "byte"} - -func (s *Server) makeTubeService() *restful.WebService { - - ws := new(restful.WebService) - ws.Path("/api/v1"). - Consumes(restful.MIME_JSON). - Produces(restful.MIME_JSON) - - tags := []string{"tube"} - - tubeName := ws.PathParameter("name", "tube name").DataType("string") - - ws.Route(ws.POST("/produce/{name}"). - To(func(request *restful.Request, response *restful.Response) { - name := request.PathParameter("name") - body := request.Request.Body - defer func() { - s.handleRestError(body.Close()) - }() - - content, err := io.ReadAll(body) - if err != nil { - s.handleRestError(response.WriteErrorString(http.StatusInternalServerError, err.Error())) - return - } - err = s.Manager.ProduceEvent(name, contube.NewRecordImpl(content, func() {})) - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - response.WriteHeader(http.StatusOK) - }). - Doc("produce a message"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("produceMessage"). - Reads(bytesSchema). - Param(tubeName)) - - ws.Route(ws.GET("/consume/{name}"). - To(func(request *restful.Request, response *restful.Response) { - name := request.PathParameter("name") - record, err := s.Manager.ConsumeEvent(name) - if err != nil { - s.handleRestError(response.WriteError(http.StatusInternalServerError, err)) - return - } - _, err = response.Write(record.GetPayload()) - s.handleRestError(err) - }). - Doc("consume a message"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Operation("consumeMessage"). - Writes(bytesSchema). - Returns(http.StatusOK, "OK", bytesSchema). - Param(tubeName)) - - return ws -} diff --git a/src/config/global_config.rs b/src/config/global_config.rs new file mode 100644 index 00000000..b4f92edd --- /dev/null +++ b/src/config/global_config.rs @@ -0,0 +1,85 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; +use serde_yaml::Value; +use uuid::Uuid; + +use crate::config::log_config::LogConfig; +use crate::config::python_config::PythonConfig; +use crate::config::service_config::ServiceConfig; +use crate::config::wasm_config::WasmConfig; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct GlobalConfig { + pub service: ServiceConfig, + pub logging: LogConfig, + #[serde(default)] + pub python: PythonConfig, + #[serde(default)] + pub wasm: WasmConfig, + #[serde(default)] + pub state_storage: crate::config::storage::StateStorageConfig, + #[serde(default)] + pub task_storage: crate::config::storage::TaskStorageConfig, +} + +impl GlobalConfig { + pub fn from_cargo() -> Self { + let mut config = Self::default(); + config.service.version = env!("CARGO_PKG_VERSION").to_string(); + config.service.service_name = Uuid::new_v4().to_string(); + config + } + + pub fn cargo_version() -> &'static str { + env!("CARGO_PKG_VERSION") + } + + pub fn new() -> Self { + Self::default() + } + + pub fn from_yaml_value(value: Value) -> Result> { + let config: GlobalConfig = serde_yaml::from_value(value)?; + Ok(config) + } + + pub fn service_id(&self) -> &str { + &self.service.service_id + } + + pub fn port(&self) -> u16 { + self.service.port + } + + pub fn validate(&self) -> Result<(), String> { + if self.service.port == 0 { + return Err(format!("Invalid port: {}", self.service.port)); + } + Ok(()) + } + + pub fn load>( + path: Option

, + ) -> Result> { + let config_path = path + .map(|p| p.as_ref().to_path_buf()) + .unwrap_or_else(|| std::path::PathBuf::from("../../conf/config.yaml")); + + if config_path.exists() { + crate::config::load_global_config(&config_path) + } else { + Ok(Self::default()) + } + } +} diff --git a/src/config/loader.rs b/src/config/loader.rs new file mode 100644 index 00000000..b8bf845e --- /dev/null +++ b/src/config/loader.rs @@ -0,0 +1,30 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde_yaml::Value; +use std::fs; +use std::path::Path; + +/// Read configuration from YAML file +pub fn read_yaml_file>(path: P) -> Result> { + let content = fs::read_to_string(path)?; + let value: Value = serde_yaml::from_str(&content)?; + Ok(value) +} + +/// Load global configuration from file +pub fn load_global_config>( + path: P, +) -> Result> { + let value = read_yaml_file(path)?; + crate::config::GlobalConfig::from_yaml_value(value) +} diff --git a/src/config/log_config.rs b/src/config/log_config.rs new file mode 100644 index 00000000..ecc88c75 --- /dev/null +++ b/src/config/log_config.rs @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LogConfig { + pub level: String, + pub format: String, + pub file_path: Option, + pub max_file_size: Option, + pub max_files: Option, +} + +impl Default for LogConfig { + fn default() -> Self { + Self { + level: "info".to_string(), + format: "json".to_string(), + file_path: Some("logs/app.log".to_string()), + max_file_size: Some(100), + max_files: Some(5), + } + } +} diff --git a/src/config/mod.rs b/src/config/mod.rs new file mode 100644 index 00000000..251c1f3c --- /dev/null +++ b/src/config/mod.rs @@ -0,0 +1,32 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod global_config; +pub mod loader; +pub mod log_config; +pub mod paths; +pub mod python_config; +pub mod service_config; +pub mod storage; +pub mod wasm_config; + +pub use global_config::GlobalConfig; +pub use loader::load_global_config; +pub use log_config::LogConfig; +#[allow(unused_imports)] +pub use paths::{ + ENV_CONF, ENV_HOME, find_config_file, get_app_log_path, get_conf_dir, get_data_dir, + get_log_path, get_logs_dir, get_project_root, get_python_cache_dir, get_python_cwasm_path, + get_python_wasm_path, get_state_dir, get_state_dir_for_base, get_task_dir, get_wasm_cache_dir, + resolve_path, +}; +pub use python_config::PythonConfig; diff --git a/src/config/paths.rs b/src/config/paths.rs new file mode 100644 index 00000000..937f6607 --- /dev/null +++ b/src/config/paths.rs @@ -0,0 +1,145 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::env; +use std::fs; +use std::path::PathBuf; +use std::sync::OnceLock; + +pub const ENV_HOME: &str = "FUNCTION_STREAM_HOME"; +pub const ENV_CONF: &str = "FUNCTION_STREAM_CONF"; + +static PROJECT_ROOT: OnceLock = OnceLock::new(); + +pub fn get_project_root() -> &'static PathBuf { + PROJECT_ROOT + .get_or_init(|| resolve_project_root().expect("CRITICAL: Failed to resolve project root")) +} + +fn resolve_project_root() -> std::io::Result { + if let Ok(home) = env::var(ENV_HOME) { + let path = PathBuf::from(&home); + return path.canonicalize().or(Ok(path)); + } + + if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") { + return Ok(PathBuf::from(manifest_dir)); + } + + if let Ok(exe_path) = env::current_exe() { + let mut path = exe_path; + path.pop(); + if path.file_name().map_or(false, |n| n == "bin") { + path.pop(); + } + return Ok(path); + } + + env::current_dir() +} + +pub fn resolve_path(input_path: &str) -> PathBuf { + let path = PathBuf::from(input_path); + if path.is_absolute() { + path + } else { + get_project_root().join(path) + } +} + +fn to_absolute_path(input_path: &str) -> PathBuf { + resolve_path(input_path) +} + +pub fn find_config_file(config_name: &str) -> Option { + if let Ok(conf_env) = env::var(ENV_CONF) { + let path = to_absolute_path(&conf_env); + if path.is_file() { + return Some(path); + } + if path.is_dir() { + let full = path.join(config_name); + if full.exists() { + return Some(full); + } + } + } + + let search_paths = vec![ + get_conf_dir().join(config_name), + get_project_root().join(config_name), + ]; + + for path in search_paths { + if path.exists() { + return Some(path.canonicalize().unwrap_or(path)); + } + } + + None +} + +fn get_or_create_sub_dir(name: &str) -> PathBuf { + let dir = get_project_root().join(name); + if !dir.exists() { + let _ = fs::create_dir_all(&dir); + } + dir +} + +pub fn get_data_dir() -> PathBuf { + get_or_create_sub_dir("data") +} + +pub fn get_logs_dir() -> PathBuf { + get_or_create_sub_dir("logs") +} + +pub fn get_conf_dir() -> PathBuf { + get_or_create_sub_dir("conf") +} + +pub fn get_task_dir() -> PathBuf { + get_or_create_sub_dir("data/task") +} + +pub fn get_state_dir() -> PathBuf { + get_or_create_sub_dir("data/state") +} + +pub fn get_state_dir_for_base(base: &str) -> PathBuf { + resolve_path(base).join("state") +} + +pub fn get_app_log_path() -> PathBuf { + get_logs_dir().join("app.log") +} + +pub fn get_log_path(relative: &str) -> PathBuf { + get_logs_dir().join(relative) +} + +pub fn get_wasm_cache_dir() -> PathBuf { + get_or_create_sub_dir("data/cache/wasm-incremental") +} + +pub fn get_python_cache_dir() -> PathBuf { + get_or_create_sub_dir("data/cache/python-runner") +} + +pub fn get_python_wasm_path() -> PathBuf { + get_python_cache_dir().join("functionstream-python-runtime.wasm") +} + +pub fn get_python_cwasm_path() -> PathBuf { + get_python_cache_dir().join("functionstream-python-runtime.cwasm") +} diff --git a/src/config/python_config.rs b/src/config/python_config.rs new file mode 100644 index 00000000..9d539c5f --- /dev/null +++ b/src/config/python_config.rs @@ -0,0 +1,69 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +pub const DEFAULT_PYTHON_WASM_FILENAME: &str = "functionstream-python-runtime.wasm"; +pub const DEFAULT_PYTHON_CWASM_FILENAME: &str = "functionstream-python-runtime.cwasm"; + +fn default_python_wasm_path() -> String { + super::paths::get_python_wasm_path() + .to_string_lossy() + .to_string() +} + +fn default_python_cache_dir() -> String { + super::paths::get_python_cache_dir() + .to_string_lossy() + .to_string() +} + +fn default_true() -> bool { + true +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PythonConfig { + #[serde(default = "default_python_wasm_path")] + pub wasm_path: String, + + #[serde(default = "default_python_cache_dir")] + pub cache_dir: String, + + #[serde(default = "default_true")] + pub enable_cache: bool, +} + +impl Default for PythonConfig { + fn default() -> Self { + Self { + wasm_path: default_python_wasm_path(), + cache_dir: default_python_cache_dir(), + enable_cache: true, + } + } +} + +impl PythonConfig { + pub fn wasm_path_buf(&self) -> PathBuf { + super::paths::resolve_path(&self.wasm_path) + } + + pub fn cache_dir_buf(&self) -> PathBuf { + super::paths::resolve_path(&self.cache_dir) + } + + pub fn cwasm_cache_path(&self) -> PathBuf { + super::paths::resolve_path(&self.cache_dir).join(DEFAULT_PYTHON_CWASM_FILENAME) + } +} diff --git a/src/config/service_config.rs b/src/config/service_config.rs new file mode 100644 index 00000000..4b58b2c1 --- /dev/null +++ b/src/config/service_config.rs @@ -0,0 +1,40 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ServiceConfig { + pub service_id: String, + pub service_name: String, + pub version: String, + pub host: String, + pub port: u16, + pub workers: Option, + pub worker_multiplier: Option, + pub debug: bool, +} + +impl Default for ServiceConfig { + fn default() -> Self { + Self { + service_id: "default-service".to_string(), + service_name: "function-stream".to_string(), + version: "0.1.0".to_string(), + host: "127.0.0.1".to_string(), + port: 8080, + workers: None, + worker_multiplier: Some(4), + debug: false, + } + } +} diff --git a/src/config/storage.rs b/src/config/storage.rs new file mode 100644 index 00000000..e5186648 --- /dev/null +++ b/src/config/storage.rs @@ -0,0 +1,120 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Storage Configuration - Storage configuration +// +// Defines configuration structures for state storage and task storage + +use serde::{Deserialize, Serialize}; + +/// State storage factory type +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum StateStorageType { + /// Memory storage + Memory, + /// RocksDB storage + RocksDB, +} + +/// RocksDB configuration options +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct RocksDBStorageConfig { + // Note: dir_name is no longer used, database is stored directly in {base_dir}/state/{task_name}-{time} directory + // Example: data/state/my_task-1234567890 + /// Maximum number of open files + pub max_open_files: Option, + /// Write buffer size (bytes) + pub write_buffer_size: Option, + /// Maximum number of write buffers + pub max_write_buffer_number: Option, + /// Target file size base (bytes) + pub target_file_size_base: Option, + /// Maximum bytes for level base (bytes) + pub max_bytes_for_level_base: Option, + // Note: Compression configuration is not currently supported, uses default none compression +} + +/// State storage configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StateStorageConfig { + /// Storage type + #[serde(default = "default_state_storage_type")] + pub storage_type: StateStorageType, + /// Base directory path (required for RocksDB) + /// Final path format: {base_dir}/state/{task_name}-{created_at} + /// Example: if base_dir is "data", task name is "my_task", created_at is 1234567890 + /// then the full path is: data/state/my_task-1234567890 + /// Default uses the data directory returned by find_or_create_data_dir() + #[serde(default = "default_base_dir")] + pub base_dir: Option, + /// RocksDB configuration (only used when storage_type is RocksDB) + #[serde(default)] + pub rocksdb: RocksDBStorageConfig, +} + +fn default_state_storage_type() -> StateStorageType { + StateStorageType::RocksDB +} + +fn default_base_dir() -> Option { + // Default base directory is "data" (lowercase) + // In actual use, if not specified in config, should use the result of find_or_create_data_dir() + Some("data".to_string()) +} + +impl Default for StateStorageConfig { + fn default() -> Self { + Self { + storage_type: StateStorageType::RocksDB, + base_dir: default_base_dir(), + rocksdb: RocksDBStorageConfig::default(), + } + } +} + +/// Task storage type +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TaskStorageType { + /// RocksDB storage + RocksDB, +} + +/// Task storage configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TaskStorageConfig { + /// Storage type + #[serde(default = "default_task_storage_type")] + pub storage_type: TaskStorageType, + /// Database path (optional, if None, uses default path `data/task/{task_name}`) + /// Default path format: `data/task/{task_name}` + /// Example: `data/task/my_task` + pub db_path: Option, + /// RocksDB configuration + #[serde(default)] + pub rocksdb: RocksDBStorageConfig, +} + +fn default_task_storage_type() -> TaskStorageType { + TaskStorageType::RocksDB +} + +impl Default for TaskStorageConfig { + fn default() -> Self { + Self { + storage_type: TaskStorageType::RocksDB, + db_path: None, + rocksdb: RocksDBStorageConfig::default(), + } + } +} diff --git a/src/config/wasm_config.rs b/src/config/wasm_config.rs new file mode 100644 index 00000000..f3d3a0f1 --- /dev/null +++ b/src/config/wasm_config.rs @@ -0,0 +1,47 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; + +fn default_wasm_cache_dir() -> String { + crate::config::paths::get_wasm_cache_dir() + .to_string_lossy() + .to_string() +} + +fn default_true() -> bool { + true +} + +fn default_max_cache_size() -> u64 { + 100 * 1024 * 1024 +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WasmConfig { + #[serde(default = "default_wasm_cache_dir")] + pub cache_dir: String, + #[serde(default = "default_true")] + pub enable_cache: bool, + #[serde(default = "default_max_cache_size")] + pub max_cache_size: u64, +} + +impl Default for WasmConfig { + fn default() -> Self { + Self { + cache_dir: default_wasm_cache_dir(), + enable_cache: true, + max_cache_size: default_max_cache_size(), + } + } +} diff --git a/common/run.go b/src/coordinator/analyze/analysis.rs similarity index 63% rename from common/run.go rename to src/coordinator/analyze/analysis.rs index 99031f1f..6542112d 100644 --- a/common/run.go +++ b/src/coordinator/analyze/analysis.rs @@ -1,5 +1,3 @@ -// Copyright 2023 StreamNative, Inc. -// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -12,25 +10,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package common +use crate::coordinator::statement::Statement; -import ( - "io" - "log/slog" - "os" -) +#[derive(Debug)] +pub struct Analysis { + pub statement: Box, +} -func RunProcess(startProcess func() (io.Closer, error)) { - process, err := startProcess() - if err != nil { - slog.Error( - "Failed to start the process", - slog.Any("error", err), - ) - os.Exit(1) - } +impl Analysis { + pub fn new(statement: Box) -> Self { + Self { statement } + } - WaitUntilSignal( - process, - ) + pub fn statement(&self) -> &dyn Statement { + self.statement.as_ref() + } } diff --git a/src/coordinator/analyze/analyzer.rs b/src/coordinator/analyze/analyzer.rs new file mode 100644 index 00000000..30552191 --- /dev/null +++ b/src/coordinator/analyze/analyzer.rs @@ -0,0 +1,118 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Analysis; +use crate::coordinator::execution_context::ExecutionContext; +use crate::coordinator::statement::{ + CreateFunction, CreatePythonFunction, DropFunction, ShowFunctions, StartFunction, Statement, + StatementVisitor, StatementVisitorContext, StatementVisitorResult, StopFunction, +}; +use std::fmt; + +#[derive(Debug, Clone)] +pub struct AnalyzeError { + pub message: String, +} + +impl AnalyzeError { + pub fn new(message: impl Into) -> Self { + Self { + message: message.into(), + } + } +} + +impl fmt::Display for AnalyzeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Analyze error: {}", self.message) + } +} + +impl std::error::Error for AnalyzeError {} + +/// Analyzer performs semantic analysis +pub struct Analyzer<'a> { + #[allow(dead_code)] + context: &'a ExecutionContext, +} + +impl<'a> Analyzer<'a> { + pub fn new(context: &'a ExecutionContext) -> Self { + Self { context } + } + + /// Analyze Statement and return Analysis + pub fn analyze(&self, stmt: &dyn Statement) -> Result { + let visitor_context = StatementVisitorContext::Empty; + let analyzed_stmt = match stmt.accept(self, &visitor_context) { + StatementVisitorResult::Analyze(result) => result, + _ => return Err(AnalyzeError::new("Analyzer should return Analyze result")), + }; + Ok(Analysis::new(analyzed_stmt)) + } +} + +impl StatementVisitor for Analyzer<'_> { + fn visit_create_function( + &self, + stmt: &CreateFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + // Function source is already validated during parsing (from_properties) + // So we just need to check if it exists + let _function_source = stmt.get_function_source(); + + // Note: name is read from config file, not from SQL statement + // So we don't validate name here - it will be validated when config file is read + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } + + fn visit_drop_function( + &self, + stmt: &DropFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } + + fn visit_start_function( + &self, + stmt: &StartFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } + + fn visit_stop_function( + &self, + stmt: &StopFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } + + fn visit_show_functions( + &self, + stmt: &ShowFunctions, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } + + fn visit_create_python_function( + &self, + stmt: &CreatePythonFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Analyze(Box::new(stmt.clone())) + } +} diff --git a/src/coordinator/analyze/mod.rs b/src/coordinator/analyze/mod.rs new file mode 100644 index 00000000..45d1b83b --- /dev/null +++ b/src/coordinator/analyze/mod.rs @@ -0,0 +1,17 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod analysis; +pub mod analyzer; + +pub use analysis::Analysis; +pub use analyzer::Analyzer; diff --git a/src/coordinator/coordinator.rs b/src/coordinator/coordinator.rs new file mode 100644 index 00000000..7f5d4dbb --- /dev/null +++ b/src/coordinator/coordinator.rs @@ -0,0 +1,131 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::Instant; + +use anyhow::{Context, Result}; + +use crate::coordinator::analyze::{Analysis, Analyzer}; +use crate::coordinator::dataset::ExecuteResult; +use crate::coordinator::execution::Executor; +use crate::coordinator::plan::{LogicalPlanVisitor, LogicalPlanner, PlanNode}; +use crate::coordinator::statement::Statement; +use crate::runtime::taskexecutor::TaskManager; + +use super::execution_context::ExecutionContext; + +pub struct Coordinator {} + +impl Coordinator { + pub fn new() -> Self { + Self {} + } + + pub fn execute(&self, stmt: &dyn Statement) -> ExecuteResult { + let start_time = Instant::now(); + let context = ExecutionContext::new(); + let execution_id = context.execution_id; + + match self.execute_pipeline(&context, stmt) { + Ok(result) => { + log::debug!( + "[{}] Execution completed in {}ms", + execution_id, + start_time.elapsed().as_millis() + ); + result + } + Err(e) => { + log::error!( + "[{}] Execution failed after {}ms. Error: {:#}", + execution_id, + start_time.elapsed().as_millis(), + e + ); + ExecuteResult::err(format!("Execution failed: {:#}", e)) + } + } + } + + fn execute_pipeline( + &self, + context: &ExecutionContext, + stmt: &dyn Statement, + ) -> Result { + let analysis = self.step_analyze(context, stmt)?; + let plan = self.step_build_logical_plan(&analysis)?; + let optimized_plan = self.step_optimize(&analysis, plan)?; + self.step_execute(optimized_plan) + } + + fn step_analyze(&self, context: &ExecutionContext, stmt: &dyn Statement) -> Result { + let start = Instant::now(); + let analyzer = Analyzer::new(context); + let result = analyzer + .analyze(stmt) + .map_err(|e| anyhow::anyhow!(e)) + .context("Analyzer phase failed"); + + log::debug!( + "[{}] Analyze phase finished in {}ms", + context.execution_id, + start.elapsed().as_millis() + ); + result + } + + fn step_build_logical_plan(&self, analysis: &Analysis) -> Result> { + let visitor = LogicalPlanVisitor::new(); + let plan = visitor.visit(analysis); + Ok(plan) + } + + fn step_optimize( + &self, + analysis: &Analysis, + plan: Box, + ) -> Result> { + let start = Instant::now(); + let planner = LogicalPlanner::new(); + let optimized = planner.optimize(plan, analysis); + + log::debug!( + "Optimizer phase finished in {}ms", + start.elapsed().as_millis() + ); + Ok(optimized) + } + + fn step_execute(&self, plan: Box) -> Result { + let start = Instant::now(); + let task_manager = match TaskManager::get() { + Ok(tm) => tm, + Err(e) => { + return Ok(ExecuteResult::err(format!( + "Failed to get TaskManager: {}", + e + ))); + } + }; + let executor = Executor::new(task_manager.clone()); + let result = executor + .execute(plan.as_ref()) + .map_err(|e| anyhow::anyhow!(e)) + .context("Executor phase failed"); + + log::debug!( + "Executor phase finished in {}ms", + start.elapsed().as_millis() + ); + result + } +} diff --git a/src/coordinator/dataset/data_set.rs b/src/coordinator/dataset/data_set.rs new file mode 100644 index 00000000..23519cad --- /dev/null +++ b/src/coordinator/dataset/data_set.rs @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use arrow_array::RecordBatch; +use arrow_schema::Schema; + +/// Create an empty RecordBatch (0 columns, 0 rows). +pub fn empty_record_batch() -> RecordBatch { + RecordBatch::new_empty(Arc::new(Schema::empty())) +} + +/// DataSet interface: conversion to Arrow RecordBatch. +/// Supertrait `Any` allows downcasting `Arc` to concrete types (e.g. `ShowFunctionsResult`). +pub trait DataSet: Send + Sync + std::any::Any { + /// Convert to RecordBatch. + fn to_record_batch(&self) -> RecordBatch; +} + +impl DataSet for RecordBatch { + fn to_record_batch(&self) -> RecordBatch { + self.clone() + } +} diff --git a/src/coordinator/dataset/execute_result.rs b/src/coordinator/dataset/execute_result.rs new file mode 100644 index 00000000..08b3da7c --- /dev/null +++ b/src/coordinator/dataset/execute_result.rs @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt; +use std::sync::Arc; + +use super::DataSet; + +#[derive(Clone)] +pub struct ExecuteResult { + pub success: bool, + pub message: String, + pub data: Option>, +} + +impl fmt::Debug for ExecuteResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ExecuteResult") + .field("success", &self.success) + .field("message", &self.message) + .field("data", &self.data.as_ref().map(|_| "...")) + .finish() + } +} + +impl ExecuteResult { + pub fn ok(message: impl Into) -> Self { + Self { + success: true, + message: message.into(), + data: None, + } + } + + pub fn ok_with_data(message: impl Into, data: impl DataSet + 'static) -> Self { + Self { + success: true, + message: message.into(), + data: Some(Arc::new(data)), + } + } + + pub fn err(message: impl Into) -> Self { + Self { + success: false, + message: message.into(), + data: None, + } + } +} diff --git a/src/coordinator/dataset/mod.rs b/src/coordinator/dataset/mod.rs new file mode 100644 index 00000000..b72613da --- /dev/null +++ b/src/coordinator/dataset/mod.rs @@ -0,0 +1,19 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod data_set; +mod execute_result; +mod show_functions_result; + +pub use data_set::{DataSet, empty_record_batch}; +pub use execute_result::ExecuteResult; +pub use show_functions_result::ShowFunctionsResult; diff --git a/src/coordinator/dataset/show_functions_result.rs b/src/coordinator/dataset/show_functions_result.rs new file mode 100644 index 00000000..c16edf6d --- /dev/null +++ b/src/coordinator/dataset/show_functions_result.rs @@ -0,0 +1,62 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use arrow_array::{RecordBatch, StringArray}; +use arrow_schema::{DataType, Field, Schema}; + +use super::DataSet; +use crate::storage::task::FunctionInfo; + +#[derive(Clone, Debug)] +pub struct ShowFunctionsResult { + functions: Vec, +} + +impl ShowFunctionsResult { + pub fn new(functions: Vec) -> Self { + Self { functions } + } + + pub fn functions(&self) -> &[FunctionInfo] { + &self.functions + } +} + +impl DataSet for ShowFunctionsResult { + fn to_record_batch(&self) -> RecordBatch { + let names: Vec<&str> = self.functions.iter().map(|f| f.name.as_str()).collect(); + let types: Vec<&str> = self + .functions + .iter() + .map(|f| f.task_type.as_str()) + .collect(); + let statuses: Vec<&str> = self.functions.iter().map(|f| f.status.as_str()).collect(); + + let schema = Arc::new(Schema::new(vec![ + Field::new("name", DataType::Utf8, false), + Field::new("task_type", DataType::Utf8, false), + Field::new("status", DataType::Utf8, false), + ])); + + RecordBatch::try_new( + schema, + vec![ + Arc::new(StringArray::from(names)), + Arc::new(StringArray::from(types)), + Arc::new(StringArray::from(statuses)), + ], + ) + .unwrap_or_else(|_| RecordBatch::new_empty(Arc::new(Schema::empty()))) + } +} diff --git a/src/coordinator/execution/executor.rs b/src/coordinator/execution/executor.rs new file mode 100644 index 00000000..2f7e000f --- /dev/null +++ b/src/coordinator/execution/executor.rs @@ -0,0 +1,211 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::coordinator::dataset::{ExecuteResult, ShowFunctionsResult, empty_record_batch}; +use crate::coordinator::plan::{ + CreateFunctionPlan, CreatePythonFunctionPlan, DropFunctionPlan, PlanNode, PlanVisitor, + PlanVisitorContext, PlanVisitorResult, ShowFunctionsPlan, StartFunctionPlan, StopFunctionPlan, +}; +use crate::coordinator::statement::{ConfigSource, FunctionSource}; +use crate::runtime::taskexecutor::TaskManager; +use std::sync::Arc; +use thiserror::Error; +use tracing::{debug, info}; + +#[derive(Error, Debug)] +pub enum ExecuteError { + #[error("Execution failed: {0}")] + Internal(String), + #[error("IO error during execution: {0}")] + Io(#[from] std::io::Error), + #[error("Task manager error: {0}")] + Task(String), + #[error("Validation error: {0}")] + Validation(String), +} + +pub struct Executor { + task_manager: Arc, +} + +impl Executor { + pub fn new(task_manager: Arc) -> Self { + Self { task_manager } + } + + pub fn execute(&self, plan: &dyn PlanNode) -> Result { + let timer = std::time::Instant::now(); + let context = PlanVisitorContext::new(); + + let visitor_result = plan.accept(self, &context); + + match visitor_result { + PlanVisitorResult::Execute(result) => { + let elapsed = timer.elapsed(); + debug!(target: "executor", elapsed_ms = elapsed.as_millis(), "Execution completed"); + result + } + } + } +} + +impl PlanVisitor for Executor { + fn visit_create_function( + &self, + plan: &CreateFunctionPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = (|| -> Result { + let function_bytes = match &plan.function_source { + FunctionSource::Path(path) => std::fs::read(path).map_err(|e| { + ExecuteError::Validation(format!("Failed to read function at {}: {}", path, e)) + })?, + FunctionSource::Bytes(bytes) => bytes.clone(), + }; + + let config_bytes = match &plan.config_source { + Some(ConfigSource::Path(path)) => std::fs::read(path).map_err(|e| { + ExecuteError::Validation(format!("Failed to read config at {}: {}", path, e)) + })?, + Some(ConfigSource::Bytes(bytes)) => bytes.clone(), + None => { + return Err(ExecuteError::Validation( + "Configuration bytes required for function creation".into(), + )); + } + }; + + info!(config_size = config_bytes.len(), "Registering Wasm task"); + self.task_manager + .register_task(&config_bytes, &function_bytes) + .map_err(|e| ExecuteError::Task(format!("Registration failed: {:?}", e)))?; + + Ok(ExecuteResult::ok_with_data( + "Function registered successfully", + empty_record_batch(), + )) + })(); + + PlanVisitorResult::Execute(result) + } + + fn visit_drop_function( + &self, + plan: &DropFunctionPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = (|| -> Result { + let status = self + .task_manager + .get_task_status(&plan.name) + .map_err(|e| ExecuteError::Task(format!("Task discovery failed: {}", e)))?; + + if status.is_running() { + return Err(ExecuteError::Validation(format!( + "Task '{}' is currently running. Use FORCE to drop.", + plan.name + ))); + } + + self.task_manager + .remove_task(&plan.name) + .map_err(|e| ExecuteError::Task(format!("Removal failed: {}", e)))?; + + Ok(ExecuteResult::ok_with_data( + format!("Function '{}' dropped", plan.name), + empty_record_batch(), + )) + })(); + + PlanVisitorResult::Execute(result) + } + + fn visit_start_function( + &self, + plan: &StartFunctionPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = self + .task_manager + .start_task(&plan.name) + .map(|_| { + ExecuteResult::ok_with_data( + format!("Function '{}' started", plan.name), + empty_record_batch(), + ) + }) + .map_err(|e| ExecuteError::Task(e.to_string())); + + PlanVisitorResult::Execute(result) + } + + fn visit_show_functions( + &self, + _plan: &ShowFunctionsPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = (|| -> Result { + let functions = self.task_manager.list_all_functions(); + + Ok(ExecuteResult::ok_with_data( + format!("Found {} task(s)", functions.len()), + ShowFunctionsResult::new(functions), + )) + })(); + + PlanVisitorResult::Execute(result) + } + + fn visit_create_python_function( + &self, + plan: &CreatePythonFunctionPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = (|| -> Result { + let modules: Vec<(String, Vec)> = plan + .modules + .iter() + .map(|m| (m.name.clone(), m.bytes.clone())) + .collect(); + + self.task_manager + .register_python_task(plan.config_content.as_bytes(), &modules) + .map_err(|e| ExecuteError::Task(format!("Python registration failed: {}", e)))?; + + Ok(ExecuteResult::ok_with_data( + format!("Python function '{}' deployed", plan.class_name), + empty_record_batch(), + )) + })(); + + PlanVisitorResult::Execute(result) + } + + fn visit_stop_function( + &self, + plan: &StopFunctionPlan, + _context: &PlanVisitorContext, + ) -> PlanVisitorResult { + let result = self + .task_manager + .stop_task(&plan.name) + .map(|_| { + ExecuteResult::ok_with_data( + format!("Function '{}' stopped", plan.name), + empty_record_batch(), + ) + }) + .map_err(|e| ExecuteError::Task(e.to_string())); + + PlanVisitorResult::Execute(result) + } +} diff --git a/src/coordinator/execution/mod.rs b/src/coordinator/execution/mod.rs new file mode 100644 index 00000000..c0890a88 --- /dev/null +++ b/src/coordinator/execution/mod.rs @@ -0,0 +1,15 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod executor; + +pub use executor::{ExecuteError, Executor}; diff --git a/src/coordinator/execution_context.rs b/src/coordinator/execution_context.rs new file mode 100644 index 00000000..41e3f106 --- /dev/null +++ b/src/coordinator/execution_context.rs @@ -0,0 +1,55 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::{Duration, Instant}; + +static EXECUTION_ID_GENERATOR: AtomicU64 = AtomicU64::new(1); + +#[derive(Debug)] +pub struct ExecutionContext { + pub execution_id: u64, + pub start_time: Instant, + pub timeout: Duration, +} + +impl ExecutionContext { + pub fn new() -> Self { + Self { + execution_id: EXECUTION_ID_GENERATOR.fetch_add(1, Ordering::SeqCst), + start_time: Instant::now(), + timeout: Duration::from_secs(30), + } + } + + pub fn set_timeout(&mut self, timeout: Duration) { + self.timeout = timeout; + } + + pub fn elapsed(&self) -> Duration { + self.start_time.elapsed() + } + + pub fn is_timeout(&self) -> bool { + self.elapsed() >= self.timeout + } + + pub fn remaining_timeout(&self) -> Duration { + self.timeout.saturating_sub(self.elapsed()) + } +} + +impl Default for ExecutionContext { + fn default() -> Self { + Self::new() + } +} diff --git a/src/coordinator/mod.rs b/src/coordinator/mod.rs new file mode 100644 index 00000000..b07de414 --- /dev/null +++ b/src/coordinator/mod.rs @@ -0,0 +1,26 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod analyze; +mod coordinator; +mod dataset; +mod execution; +mod execution_context; +mod plan; +mod statement; + +pub use coordinator::Coordinator; +pub use dataset::{DataSet, ShowFunctionsResult}; +pub use statement::{ + CreateFunction, CreatePythonFunction, DropFunction, PythonModule, ShowFunctions, StartFunction, + Statement, StopFunction, +}; diff --git a/src/coordinator/plan/create_function_plan.rs b/src/coordinator/plan/create_function_plan.rs new file mode 100644 index 00000000..1ec72675 --- /dev/null +++ b/src/coordinator/plan/create_function_plan.rs @@ -0,0 +1,42 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; +use crate::coordinator::statement::{ConfigSource, FunctionSource}; +use std::collections::HashMap; + +#[derive(Debug, Clone)] +pub struct CreateFunctionPlan { + pub function_source: FunctionSource, + pub config_source: Option, + pub properties: HashMap, +} + +impl CreateFunctionPlan { + pub fn new( + function_source: FunctionSource, + config_source: Option, + properties: HashMap, + ) -> Self { + Self { + function_source, + config_source, + properties, + } + } +} + +impl PlanNode for CreateFunctionPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_create_function(self, context) + } +} diff --git a/src/coordinator/plan/create_python_function_plan.rs b/src/coordinator/plan/create_python_function_plan.rs new file mode 100644 index 00000000..7591e1bd --- /dev/null +++ b/src/coordinator/plan/create_python_function_plan.rs @@ -0,0 +1,37 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; +use crate::coordinator::statement::PythonModule; + +#[derive(Debug, Clone)] +pub struct CreatePythonFunctionPlan { + pub class_name: String, + pub modules: Vec, + pub config_content: String, +} + +impl CreatePythonFunctionPlan { + pub fn new(class_name: String, modules: Vec, config_content: String) -> Self { + Self { + class_name, + modules, + config_content, + } + } +} + +impl PlanNode for CreatePythonFunctionPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_create_python_function(self, context) + } +} diff --git a/src/coordinator/plan/drop_function_plan.rs b/src/coordinator/plan/drop_function_plan.rs new file mode 100644 index 00000000..5af51ed9 --- /dev/null +++ b/src/coordinator/plan/drop_function_plan.rs @@ -0,0 +1,30 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; + +#[derive(Debug, Clone)] +pub struct DropFunctionPlan { + pub name: String, +} + +impl DropFunctionPlan { + pub fn new(name: String) -> Self { + Self { name } + } +} + +impl PlanNode for DropFunctionPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_drop_function(self, context) + } +} diff --git a/src/coordinator/plan/logical_plan_visitor.rs b/src/coordinator/plan/logical_plan_visitor.rs new file mode 100644 index 00000000..536fec37 --- /dev/null +++ b/src/coordinator/plan/logical_plan_visitor.rs @@ -0,0 +1,109 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::coordinator::analyze::analysis::Analysis; +use crate::coordinator::plan::{ + CreateFunctionPlan, CreatePythonFunctionPlan, DropFunctionPlan, PlanNode, ShowFunctionsPlan, + StartFunctionPlan, StopFunctionPlan, +}; +use crate::coordinator::statement::{ + CreateFunction, CreatePythonFunction, DropFunction, ShowFunctions, StartFunction, + StatementVisitor, StatementVisitorContext, StatementVisitorResult, StopFunction, +}; + +#[derive(Debug, Default)] +pub struct LogicalPlanVisitor; + +impl LogicalPlanVisitor { + pub fn new() -> Self { + Self + } + + pub fn visit(&self, analysis: &Analysis) -> Box { + let context = StatementVisitorContext::Empty; + let stmt = analysis.statement(); + + let result = stmt.accept(self, &context); + + match result { + StatementVisitorResult::Plan(plan) => plan, + _ => panic!("LogicalPlanVisitor should return Plan"), + } + } +} + +impl StatementVisitor for LogicalPlanVisitor { + fn visit_create_function( + &self, + stmt: &CreateFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + let function_source = stmt.get_function_source().clone(); + let config_source = stmt.get_config_source().cloned(); + let extra_props = stmt.get_extra_properties().clone(); + + // Name will be read from config file during execution + StatementVisitorResult::Plan(Box::new(CreateFunctionPlan::new( + function_source, + config_source, + extra_props, + ))) + } + + fn visit_drop_function( + &self, + stmt: &DropFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Plan(Box::new(DropFunctionPlan::new(stmt.name.clone()))) + } + + fn visit_start_function( + &self, + stmt: &StartFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Plan(Box::new(StartFunctionPlan::new(stmt.name.clone()))) + } + + fn visit_stop_function( + &self, + stmt: &StopFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Plan(Box::new(StopFunctionPlan::new(stmt.name.clone()))) + } + + fn visit_show_functions( + &self, + _stmt: &ShowFunctions, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + StatementVisitorResult::Plan(Box::new(ShowFunctionsPlan::new())) + } + + fn visit_create_python_function( + &self, + stmt: &CreatePythonFunction, + _context: &StatementVisitorContext, + ) -> StatementVisitorResult { + let class_name = stmt.get_class_name().to_string(); + let modules = stmt.get_modules().to_vec(); + let config_content = stmt.get_config_content().to_string(); + + StatementVisitorResult::Plan(Box::new(CreatePythonFunctionPlan::new( + class_name, + modules, + config_content, + ))) + } +} diff --git a/src/coordinator/plan/mod.rs b/src/coordinator/plan/mod.rs new file mode 100644 index 00000000..9aa403b5 --- /dev/null +++ b/src/coordinator/plan/mod.rs @@ -0,0 +1,37 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod create_function_plan; +mod create_python_function_plan; +mod drop_function_plan; +mod logical_plan_visitor; +mod optimizer; +mod show_functions_plan; +mod start_function_plan; +mod stop_function_plan; +mod visitor; + +pub use create_function_plan::CreateFunctionPlan; +pub use create_python_function_plan::CreatePythonFunctionPlan; +pub use drop_function_plan::DropFunctionPlan; +pub use logical_plan_visitor::LogicalPlanVisitor; +pub use optimizer::LogicalPlanner; +pub use show_functions_plan::ShowFunctionsPlan; +pub use start_function_plan::StartFunctionPlan; +pub use stop_function_plan::StopFunctionPlan; +pub use visitor::{PlanVisitor, PlanVisitorContext, PlanVisitorResult}; + +use std::fmt; + +pub trait PlanNode: fmt::Debug + Send + Sync { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult; +} diff --git a/src/coordinator/plan/optimizer.rs b/src/coordinator/plan/optimizer.rs new file mode 100644 index 00000000..e6294072 --- /dev/null +++ b/src/coordinator/plan/optimizer.rs @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::coordinator::analyze::Analysis; +use crate::coordinator::plan::PlanNode; +use std::fmt; + +pub trait PlanOptimizer: fmt::Debug + Send + Sync { + fn optimize(&self, plan: Box, analysis: &Analysis) -> Box; + + fn name(&self) -> &str; +} + +#[derive(Debug)] +pub struct LogicalPlanner { + optimizers: Vec>, +} + +impl LogicalPlanner { + pub fn new() -> Self { + Self { + optimizers: Vec::new(), + } + } + + pub fn with_optimizers(optimizers: Vec>) -> Self { + Self { optimizers } + } + + pub fn add_optimizer(&mut self, optimizer: Box) { + self.optimizers.push(optimizer); + } + + pub fn optimize(&self, plan: Box, analysis: &Analysis) -> Box { + let mut optimized_plan = plan; + + for optimizer in &self.optimizers { + log::debug!("Applying optimizer: {}", optimizer.name()); + optimized_plan = optimizer.optimize(optimized_plan, analysis); + } + + optimized_plan + } +} + +impl Default for LogicalPlanner { + fn default() -> Self { + Self::new() + } +} diff --git a/src/coordinator/plan/show_functions_plan.rs b/src/coordinator/plan/show_functions_plan.rs new file mode 100644 index 00000000..e1046401 --- /dev/null +++ b/src/coordinator/plan/show_functions_plan.rs @@ -0,0 +1,28 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; + +#[derive(Debug, Clone, Default)] +pub struct ShowFunctionsPlan {} + +impl ShowFunctionsPlan { + pub fn new() -> Self { + Self {} + } +} + +impl PlanNode for ShowFunctionsPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_show_functions(self, context) + } +} diff --git a/src/coordinator/plan/start_function_plan.rs b/src/coordinator/plan/start_function_plan.rs new file mode 100644 index 00000000..b1c27125 --- /dev/null +++ b/src/coordinator/plan/start_function_plan.rs @@ -0,0 +1,30 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; + +#[derive(Debug, Clone)] +pub struct StartFunctionPlan { + pub name: String, +} + +impl StartFunctionPlan { + pub fn new(name: String) -> Self { + Self { name } + } +} + +impl PlanNode for StartFunctionPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_start_function(self, context) + } +} diff --git a/src/coordinator/plan/stop_function_plan.rs b/src/coordinator/plan/stop_function_plan.rs new file mode 100644 index 00000000..8f1487e9 --- /dev/null +++ b/src/coordinator/plan/stop_function_plan.rs @@ -0,0 +1,38 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{PlanNode, PlanVisitor, PlanVisitorContext, PlanVisitorResult}; + +#[derive(Debug, Clone)] +pub struct StopFunctionPlan { + pub name: String, + pub graceful: bool, +} + +impl StopFunctionPlan { + pub fn new(name: String) -> Self { + Self { + name, + graceful: true, + } + } + + pub fn with_graceful(name: String, graceful: bool) -> Self { + Self { name, graceful } + } +} + +impl PlanNode for StopFunctionPlan { + fn accept(&self, visitor: &dyn PlanVisitor, context: &PlanVisitorContext) -> PlanVisitorResult { + visitor.visit_stop_function(self, context) + } +} diff --git a/src/coordinator/plan/visitor.rs b/src/coordinator/plan/visitor.rs new file mode 100644 index 00000000..44059c67 --- /dev/null +++ b/src/coordinator/plan/visitor.rs @@ -0,0 +1,87 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + CreateFunctionPlan, CreatePythonFunctionPlan, DropFunctionPlan, ShowFunctionsPlan, + StartFunctionPlan, StopFunctionPlan, +}; + +/// Context passed to PlanVisitor methods +/// +/// This context can be extended in the future to include additional information +/// needed by visitors, such as execution environment, configuration, etc. +#[derive(Debug, Clone, Default)] +pub struct PlanVisitorContext { + // Future: Add fields as needed, e.g.: + // pub execution_env: Option, + // pub config: Option, +} + +impl PlanVisitorContext { + pub fn new() -> Self { + Self::default() + } +} + +use crate::coordinator::dataset::ExecuteResult; +use crate::coordinator::execution::ExecuteError; + +/// Result returned by PlanVisitor methods +/// +/// This enum represents all possible return types from PlanVisitor implementations. +/// Different visitors can return different types, which are wrapped in this enum. +#[derive(Debug)] +pub enum PlanVisitorResult { + /// Execute result (from Executor) + Execute(Result), + // Future: Add more result variants as needed, e.g.: + // Optimize(BoxedPlanNode), + // Analyze(Analysis), +} + +pub trait PlanVisitor { + fn visit_create_function( + &self, + plan: &CreateFunctionPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; + + fn visit_drop_function( + &self, + plan: &DropFunctionPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; + + fn visit_start_function( + &self, + plan: &StartFunctionPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; + + fn visit_stop_function( + &self, + plan: &StopFunctionPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; + + fn visit_show_functions( + &self, + plan: &ShowFunctionsPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; + + fn visit_create_python_function( + &self, + plan: &CreatePythonFunctionPlan, + context: &PlanVisitorContext, + ) -> PlanVisitorResult; +} diff --git a/src/coordinator/statement/create_function.rs b/src/coordinator/statement/create_function.rs new file mode 100644 index 00000000..997a67e8 --- /dev/null +++ b/src/coordinator/statement/create_function.rs @@ -0,0 +1,137 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; +use std::collections::HashMap; + +/// Source of function data (either file path or bytes) +#[derive(Debug, Clone)] +pub enum FunctionSource { + Path(String), + Bytes(Vec), +} + +/// Source of config data (either file path or bytes) +#[derive(Debug, Clone)] +pub enum ConfigSource { + Path(String), + Bytes(Vec), +} + +#[derive(Debug, Clone)] +pub struct CreateFunction { + pub function_source: FunctionSource, + pub config_source: Option, + pub properties: HashMap, +} + +impl CreateFunction { + pub const PROP_FUNCTION_PATH: &'static str = "function_path"; + + pub const PROP_CONFIG_PATH: &'static str = "config_path"; + + pub fn from_bytes(function_bytes: Vec, config_bytes: Option>) -> Self { + Self { + function_source: FunctionSource::Bytes(function_bytes), + config_source: config_bytes.map(ConfigSource::Bytes), + properties: HashMap::new(), + } + } + + pub fn from_properties(properties: HashMap) -> Result { + let function_source = Self::parse_function_path(&properties)?; + let config_source = Self::parse_config_path(&properties); + let extra_props = Self::extract_extra_properties(&properties); + + Ok(Self { + function_source, + config_source, + properties: extra_props, + }) + } + + /// Parse function path from properties (SQL only, Path mode) + fn parse_function_path(properties: &HashMap) -> Result { + // SQL only supports function_path (file path), not function (bytes) + if let Some(path) = Self::get_property_ci(properties, Self::PROP_FUNCTION_PATH) { + return Ok(FunctionSource::Path(path)); + } + + Err(format!( + "Missing required property '{}' (case-insensitive). SQL only supports path mode, not bytes mode.", + Self::PROP_FUNCTION_PATH + )) + } + + /// Parse config path from properties (SQL only, Path mode) + fn parse_config_path(properties: &HashMap) -> Option { + // SQL only supports config_path (file path), not config (bytes) + if let Some(path) = Self::get_property_ci(properties, Self::PROP_CONFIG_PATH) { + return Some(ConfigSource::Path(path)); + } + + None + } + + /// Extract extra properties (excluding function/config related properties) + fn extract_extra_properties(properties: &HashMap) -> HashMap { + let mut extra_props = properties.clone(); + // Remove function_path and config_path properties (case-insensitive) + let keys_to_remove: Vec = extra_props + .keys() + .filter(|k| { + let k_lower = k.to_lowercase(); + k_lower == Self::PROP_FUNCTION_PATH || k_lower == Self::PROP_CONFIG_PATH + }) + .cloned() + .collect(); + for key in keys_to_remove { + extra_props.remove(&key); + } + extra_props + } + + /// Find property value by case-insensitive key + fn get_property_ci(properties: &HashMap, key: &str) -> Option { + let key_lower = key.to_lowercase(); + for (k, v) in properties { + if k.to_lowercase() == key_lower { + return Some(v.clone()); + } + } + None + } + + /// Get function source + pub fn get_function_source(&self) -> &FunctionSource { + &self.function_source + } + + /// Get config source + pub fn get_config_source(&self) -> Option<&ConfigSource> { + self.config_source.as_ref() + } + + /// Get extra properties + pub fn get_extra_properties(&self) -> &HashMap { + &self.properties + } +} +impl Statement for CreateFunction { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_create_function(self, context) + } +} diff --git a/src/coordinator/statement/create_python_function.rs b/src/coordinator/statement/create_python_function.rs new file mode 100644 index 00000000..378490e1 --- /dev/null +++ b/src/coordinator/statement/create_python_function.rs @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +/// Module information for Python function execution +#[derive(Debug, Clone)] +pub struct PythonModule { + pub name: String, + pub bytes: Vec, +} + +#[derive(Debug, Clone)] +pub struct CreatePythonFunction { + pub class_name: String, + pub modules: Vec, + pub config_content: String, +} + +impl CreatePythonFunction { + pub fn new(class_name: String, modules: Vec, config_content: String) -> Self { + Self { + class_name, + modules, + config_content, + } + } + + pub fn get_class_name(&self) -> &str { + &self.class_name + } + + pub fn get_modules(&self) -> &[PythonModule] { + &self.modules + } + + pub fn get_config_content(&self) -> &str { + &self.config_content + } +} + +impl Statement for CreatePythonFunction { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_create_python_function(self, context) + } +} diff --git a/src/coordinator/statement/drop_function.rs b/src/coordinator/statement/drop_function.rs new file mode 100644 index 00000000..bed85782 --- /dev/null +++ b/src/coordinator/statement/drop_function.rs @@ -0,0 +1,33 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +#[derive(Debug, Clone)] +pub struct DropFunction { + pub name: String, +} + +impl DropFunction { + pub fn new(name: String) -> Self { + Self { name } + } +} +impl Statement for DropFunction { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_drop_function(self, context) + } +} diff --git a/src/coordinator/statement/mod.rs b/src/coordinator/statement/mod.rs new file mode 100644 index 00000000..f887209c --- /dev/null +++ b/src/coordinator/statement/mod.rs @@ -0,0 +1,37 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod create_function; +mod create_python_function; +mod drop_function; +mod show_functions; +mod start_function; +mod stop_function; +mod visitor; + +pub use create_function::{ConfigSource, CreateFunction, FunctionSource}; +pub use create_python_function::{CreatePythonFunction, PythonModule}; +pub use drop_function::DropFunction; +pub use show_functions::ShowFunctions; +pub use start_function::StartFunction; +pub use stop_function::StopFunction; +pub use visitor::{StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +use std::fmt; + +pub trait Statement: fmt::Debug + Send + Sync { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; +} diff --git a/src/coordinator/statement/show_functions.rs b/src/coordinator/statement/show_functions.rs new file mode 100644 index 00000000..b31983e6 --- /dev/null +++ b/src/coordinator/statement/show_functions.rs @@ -0,0 +1,32 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +#[derive(Debug, Clone, Default)] +pub struct ShowFunctions; + +impl ShowFunctions { + pub fn new() -> Self { + Self + } +} + +impl Statement for ShowFunctions { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_show_functions(self, context) + } +} diff --git a/src/coordinator/statement/start_function.rs b/src/coordinator/statement/start_function.rs new file mode 100644 index 00000000..fe2c7861 --- /dev/null +++ b/src/coordinator/statement/start_function.rs @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +#[derive(Debug, Clone)] +pub struct StartFunction { + pub name: String, +} + +impl StartFunction { + pub fn new(name: String) -> Self { + Self { name } + } +} + +impl Statement for StartFunction { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_start_function(self, context) + } +} diff --git a/src/coordinator/statement/stop_function.rs b/src/coordinator/statement/stop_function.rs new file mode 100644 index 00000000..ee48b378 --- /dev/null +++ b/src/coordinator/statement/stop_function.rs @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Statement, StatementVisitor, StatementVisitorContext, StatementVisitorResult}; + +#[derive(Debug, Clone)] +pub struct StopFunction { + pub name: String, +} + +impl StopFunction { + pub fn new(name: String) -> Self { + Self { name } + } +} + +impl Statement for StopFunction { + fn accept( + &self, + visitor: &dyn StatementVisitor, + context: &StatementVisitorContext, + ) -> StatementVisitorResult { + visitor.visit_stop_function(self, context) + } +} diff --git a/src/coordinator/statement/visitor.rs b/src/coordinator/statement/visitor.rs new file mode 100644 index 00000000..13ce2cfc --- /dev/null +++ b/src/coordinator/statement/visitor.rs @@ -0,0 +1,90 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + CreateFunction, CreatePythonFunction, DropFunction, ShowFunctions, StartFunction, StopFunction, +}; +use crate::coordinator::plan::PlanNode; +use crate::coordinator::statement::Statement; + +/// Context passed to StatementVisitor methods +/// +/// This enum can be extended in the future to include additional context variants +/// needed by different visitors, such as analysis context, execution context, etc. +#[derive(Debug, Clone, Default)] +pub enum StatementVisitorContext { + /// Empty context (default) + #[default] + Empty, + // Future: Add more context variants as needed, e.g.: + // Analyze(AnalyzeContext), + // Execute(ExecuteContext), +} + +impl StatementVisitorContext { + pub fn new() -> Self { + Self::default() + } +} + +/// Result returned by StatementVisitor methods +/// +/// This enum represents all possible return types from StatementVisitor implementations. +/// Different visitors can return different types, which are wrapped in this enum. +#[derive(Debug)] +pub enum StatementVisitorResult { + /// Statement (from Analyzer) + Analyze(Box), + + /// Plan node result (from LogicalPlanVisitor) + Plan(Box), + // Future: Add more result variants as needed, e.g.: + // Execute(ExecuteResult), +} + +pub trait StatementVisitor { + fn visit_create_function( + &self, + stmt: &CreateFunction, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; + + fn visit_drop_function( + &self, + stmt: &DropFunction, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; + + fn visit_start_function( + &self, + stmt: &StartFunction, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; + + fn visit_stop_function( + &self, + stmt: &StopFunction, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; + + fn visit_show_functions( + &self, + stmt: &ShowFunctions, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; + + fn visit_create_python_function( + &self, + stmt: &CreatePythonFunction, + context: &StatementVisitorContext, + ) -> StatementVisitorResult; +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 00000000..b4d0c8e8 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,21 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Library crate for function-stream + +pub mod config; +pub mod coordinator; +pub mod logging; +pub mod runtime; +pub mod server; +pub mod sql; +pub mod storage; diff --git a/src/logging/mod.rs b/src/logging/mod.rs new file mode 100644 index 00000000..f0a7b881 --- /dev/null +++ b/src/logging/mod.rs @@ -0,0 +1,68 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::config::{LogConfig, get_app_log_path, get_log_path, get_logs_dir}; +use anyhow::Result; +use std::fs::OpenOptions; +use std::path::{Path, PathBuf}; +use tracing_subscriber::{EnvFilter, Registry, fmt, layer::SubscriberExt, util::SubscriberInitExt}; + +pub fn init_logging(config: &LogConfig) -> Result<()> { + let (log_dir, log_file) = if let Some(ref file_path) = config.file_path { + let path = PathBuf::from(file_path); + if path.is_absolute() { + let dir = path + .parent() + .unwrap_or_else(|| Path::new("logs")) + .to_path_buf(); + (dir, path) + } else { + let full_path = get_log_path(file_path); + let dir = full_path.parent().unwrap_or(&get_logs_dir()).to_path_buf(); + (dir, full_path) + } + } else { + let file = get_app_log_path(); + (get_logs_dir(), file) + }; + + std::fs::create_dir_all(&log_dir)?; + + let log_level = config.level.parse::().unwrap_or_else(|_| { + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")) + }); + + let file = OpenOptions::new() + .create(true) + .append(true) + .open(&log_file)?; + + let (non_blocking, _guard) = tracing_appender::non_blocking(file); + + let subscriber = Registry::default() + .with(log_level) + .with( + fmt::layer() + .with_writer(non_blocking) + .with_ansi(false) + .json(), + ) + .with(fmt::layer().with_writer(std::io::stdout).with_ansi(true)); + + subscriber.init(); + + tracing::info!("Logging initialized, log file: {}", log_file.display()); + + std::mem::forget(_guard); + + Ok(()) +} diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 00000000..1454f132 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,222 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod config; +mod coordinator; +mod logging; +mod runtime; +mod server; +mod sql; +mod storage; + +use anyhow::{Context, Result}; +use std::thread; +use tokio::sync::oneshot; + +pub struct ServerHandle { + join_handle: Option>, + shutdown_tx: Option>, + error_rx: oneshot::Receiver, +} + +impl ServerHandle { + pub fn stop(mut self) { + log::info!("Initiating server shutdown sequence..."); + + if let Some(tx) = self.shutdown_tx.take() { + if tx.send(()).is_err() { + log::warn!("Server shutdown signal failed to send (receiver dropped)"); + } + } + + if let Some(handle) = self.join_handle.take() { + log::info!("Waiting for server thread to finalize..."); + if let Err(e) = handle.join() { + log::error!("Failed to join server thread: {:?}", e); + } + } + + log::info!("Server shutdown completed."); + } + + pub async fn wait_for_error(&mut self) -> Result<()> { + if let Ok(err) = (&mut self.error_rx).await { + return Err(err); + } + Ok(()) + } +} + +async fn wait_for_signal() -> Result { + #[cfg(unix)] + { + use tokio::signal::unix::{SignalKind, signal}; + let mut sigterm = signal(SignalKind::terminate()).context("Failed to register SIGTERM")?; + let mut sigint = signal(SignalKind::interrupt()).context("Failed to register SIGINT")?; + let mut sighup = signal(SignalKind::hangup()).context("Failed to register SIGHUP")?; + + tokio::select! { + _ = sigterm.recv() => Ok("SIGTERM".to_string()), + _ = sigint.recv() => Ok("SIGINT".to_string()), + _ = sighup.recv() => Ok("SIGHUP".to_string()), + } + } + #[cfg(not(unix))] + { + tokio::signal::ctrl_c() + .await + .context("Failed to listen for Ctrl+C")?; + Ok("Ctrl+C".to_string()) + } +} + +fn spawn_server_thread(config: config::GlobalConfig) -> Result { + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let (error_tx, error_rx) = oneshot::channel(); + + let cpu_count = num_cpus::get(); + let worker_threads = config.service.workers.unwrap_or_else(|| { + let multiplier = config.service.worker_multiplier.unwrap_or(4); + cpu_count * multiplier + }); + + log::info!( + "Spawning gRPC server thread (Workers: {}, Cores: {})", + worker_threads, + cpu_count + ); + + let handle = thread::Builder::new() + .name("grpc-runtime".to_string()) + .spawn(move || { + let rt = match tokio::runtime::Builder::new_multi_thread() + .worker_threads(worker_threads) + .thread_name("grpc-worker") + .enable_all() + .build() + { + Ok(rt) => rt, + Err(e) => { + let _ = error_tx.send(anyhow::anyhow!("Failed to build runtime: {}", e)); + return; + } + }; + + rt.block_on(async { + if let Err(e) = server::start_server_with_shutdown(&config, shutdown_rx, None).await + { + log::error!("Server runtime loop crashed: {}", e); + let _ = error_tx.send(e); + } + }); + }) + .context("Failed to spawn server thread")?; + + Ok(ServerHandle { + join_handle: Some(handle), + shutdown_tx: Some(shutdown_tx), + error_rx, + }) +} + +fn setup_environment() -> Result { + let data_dir = config::get_data_dir(); + let conf_dir = config::get_conf_dir(); + + let config = if let Some(path) = config::find_config_file("config.yaml") { + log::info!("Loading configuration from: {}", path.display()); + config::load_global_config(&path) + .map_err(|e| anyhow::anyhow!("{}", e)) + .context("Configuration load failed")? + } else { + log::warn!("Configuration file not found, defaulting to built-in values."); + config::GlobalConfig::default() + }; + + logging::init_logging(&config.logging).context("Logging initialization failed")?; + + log::debug!( + "Environment initialized. Data: {}, Conf: {}", + data_dir.display(), + conf_dir.display() + ); + Ok(config) +} + +fn main() -> Result<()> { + // 1. Bootstrap + let config = match setup_environment() { + Ok(c) => c, + Err(e) => { + eprintln!("Bootstrap failure: {:#}", e); + std::process::exit(1); + } + }; + + config + .validate() + .map_err(|e| anyhow::anyhow!(e)) + .context("Configuration validation failed")?; + + proctitle::set_title(format!("function-stream-{}", config.service.service_id)); + log::info!( + "Starting Service [Name: {}, ID: {}] on {}:{}", + config.service.service_name, + config.service.service_id, + config.service.host, + config.service.port + ); + + // 2. Component Initialization + let registry = server::register_components(); + registry + .initialize_all(&config) + .context("Component initialization failed")?; + + // 3. Server Startup + let mut server_handle = spawn_server_thread(config.clone())?; + log::info!("Service is running and accepting requests."); + + // 4. Main Event Loop + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .context("Control plane runtime failed")?; + + let exit_result = rt.block_on(async { + tokio::select! { + // Case A: Server crashed internally + err = server_handle.wait_for_error() => { + log::error!("Server process exited unexpectedly."); + Err(err.unwrap_err()) + } + // Case B: System signal received + sig = wait_for_signal() => { + log::info!("Received signal: {}. shutting down...", sig.unwrap_or_default()); + Ok(()) + } + } + }); + + // 5. Teardown + match exit_result { + Ok(_) => { + server_handle.stop(); + log::info!("Service stopped gracefully."); + Ok(()) + } + Err(e) => { + log::error!("Service terminated with error: {:#}", e); + std::process::exit(1); + } + } +} diff --git a/src/runtime/buffer_and_event/buffer_or_event.rs b/src/runtime/buffer_and_event/buffer_or_event.rs new file mode 100644 index 00000000..50ae0701 --- /dev/null +++ b/src/runtime/buffer_and_event/buffer_or_event.rs @@ -0,0 +1,92 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[derive(Debug)] +pub struct BufferOrEvent { + /// Buffer data (byte array, if is_buffer() returns true) + buffer: Option>, + /// Whether event has priority (can skip buffer) + has_priority: bool, + /// Channel/partition information (optional) + channel_info: Option, + /// Size (bytes) + size: usize, + /// Whether more data is available + more_available: bool, + /// Whether more priority events are available + more_priority_events: bool, +} + +impl BufferOrEvent { + /// Create BufferOrEvent of buffer type + pub fn new_buffer( + buffer: Vec, + channel_info: Option, + more_available: bool, + more_priority_events: bool, + ) -> Self { + let size = buffer.len(); + Self { + buffer: Some(buffer), + has_priority: false, + channel_info, + size, + more_available, + more_priority_events, + } + } + + /// Check if it's a buffer + pub fn is_buffer(&self) -> bool { + self.buffer.is_some() + } + + /// Check if event has priority + pub fn has_priority(&self) -> bool { + self.has_priority + } + + /// Get buffer data (if it's a buffer, returns reference to byte array) + pub fn get_buffer(&self) -> Option<&[u8]> { + self.buffer.as_deref() + } + + /// Get buffer data ownership (if it's a buffer) + pub fn into_buffer(self) -> Option> { + self.buffer + } + + /// Get channel/partition information + pub fn get_channel_info(&self) -> Option<&str> { + self.channel_info.as_deref() + } + + /// Get size (bytes) + pub fn get_size(&self) -> usize { + self.size + } + + /// Whether more data is available + pub fn more_available(&self) -> bool { + self.more_available + } + + /// Whether more priority events are available + pub fn more_priority_events(&self) -> bool { + self.more_priority_events + } + + /// Set whether more data is available + pub fn set_more_available(&mut self, more_available: bool) { + self.more_available = more_available; + } +} diff --git a/src/runtime/buffer_and_event/mod.rs b/src/runtime/buffer_and_event/mod.rs new file mode 100644 index 00000000..cca736f6 --- /dev/null +++ b/src/runtime/buffer_and_event/mod.rs @@ -0,0 +1,22 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// BufferAndEvent module - Buffer and event module +// +// Provides BufferOrEvent implementation, unified representation of data received from network or message queue +// Can be a buffer containing data records, or an event + +mod buffer_or_event; +pub mod stream_element; + +pub use buffer_or_event::BufferOrEvent; +// StreamRecord is now in the stream_element submodule, exported through stream_element diff --git a/src/runtime/buffer_and_event/stream_element/mod.rs b/src/runtime/buffer_and_event/stream_element/mod.rs new file mode 100644 index 00000000..5dd4cf1c --- /dev/null +++ b/src/runtime/buffer_and_event/stream_element/mod.rs @@ -0,0 +1,13 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod stream_element; diff --git a/src/runtime/buffer_and_event/stream_element/stream_element.rs b/src/runtime/buffer_and_event/stream_element/stream_element.rs new file mode 100644 index 00000000..7baefaf0 --- /dev/null +++ b/src/runtime/buffer_and_event/stream_element/stream_element.rs @@ -0,0 +1,26 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Debug; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum StreamElementType { + Record, + Watermark, + LatencyMarker, + RecordAttributes, + WatermarkStatus, +} + +pub trait StreamElement: Send + Sync + Debug { + fn get_type(&self) -> StreamElementType; +} diff --git a/src/runtime/common/component_state.rs b/src/runtime/common/component_state.rs new file mode 100644 index 00000000..ea251054 --- /dev/null +++ b/src/runtime/common/component_state.rs @@ -0,0 +1,193 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Component State - Task component state machine +// +// Defines common state and control mechanisms for all task components (Input, Output, Processor, etc.) +// +// This is a pure state machine definition without any interface constraints +// Each component can choose how to use these states according to its own needs + +/// Control task channel capacity (maximum number of tasks in fixed-length channel) +/// Since control tasks (CheckPoint, Stop, Close) have low frequency, capacity doesn't need to be too large +pub const CONTROL_TASK_CHANNEL_CAPACITY: usize = 10; + +/// Task component state +/// +/// Represents the lifecycle state of task components (Input, Output, Processor, etc.) +/// +/// State transition diagram: +/// ```ignore +/// Uninitialized -> Initialized -> Starting -> Running +/// | +/// v +/// Checkpointing (checkpointing) +/// | +/// v +/// Stopping -> Stopped +/// | +/// v +/// Closing -> Closed +/// +/// Error (any state can transition to error) +/// ``` +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub enum ComponentState { + /// Uninitialized + #[default] + Uninitialized, + /// Initialized + Initialized, + /// Starting + Starting, + /// Running + Running, + /// Checkpointing + Checkpointing, + /// Stopping + Stopping, + /// Stopped + Stopped, + /// Closing + Closing, + /// Closed + Closed, + /// Error state + Error { + /// Error message + error: String, + }, +} + +impl ComponentState { + /// Check if state can accept new operations + pub fn can_accept_operations(&self) -> bool { + matches!( + self, + ComponentState::Initialized | ComponentState::Running | ComponentState::Stopped + ) + } + + /// Check if state is running + pub fn is_running(&self) -> bool { + matches!( + self, + ComponentState::Running | ComponentState::Checkpointing + ) + } + + /// Check if state is closed + pub fn is_closed(&self) -> bool { + matches!(self, ComponentState::Closed) + } + + /// Check if state is in error state + pub fn is_error(&self) -> bool { + matches!(self, ComponentState::Error { .. }) + } + + /// Check if can transition from current state to target state + pub fn can_transition_to(&self, target: &ComponentState) -> bool { + use ComponentState::*; + + match (self, target) { + // Can transition from Uninitialized to Initialized + (Uninitialized, Initialized) => true, + + // Can transition from Initialized to Starting + (Initialized, Starting) => true, + + // Can transition from Starting to Running + (Starting, Running) => true, + + // Can transition from Running to Checkpointing + (Running, Checkpointing) => true, + + // Can transition from Checkpointing back to Running + (Checkpointing, Running) => true, + + // Can transition from Running or Checkpointing to Stopping + (Running, Stopping) | (Checkpointing, Stopping) => true, + + // Can transition from Stopping to Stopped + (Stopping, Stopped) => true, + + // Can restart from Stopped + (Stopped, Starting) => true, + + // Can transition from Running, Checkpointing, or Stopped to Closing + (Running, Closing) | (Checkpointing, Closing) | (Stopped, Closing) => true, + + // Can transition from Closing to Closed + (Closing, Closed) => true, + + // Any state can transition to Error state + (_, Error { .. }) => true, + + // Other transitions are not allowed + _ => false, + } + } +} + +impl std::fmt::Display for ComponentState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ComponentState::Uninitialized => write!(f, "Uninitialized"), + ComponentState::Initialized => write!(f, "Initialized"), + ComponentState::Starting => write!(f, "Starting"), + ComponentState::Running => write!(f, "Running"), + ComponentState::Checkpointing => write!(f, "Checkpointing"), + ComponentState::Stopping => write!(f, "Stopping"), + ComponentState::Stopped => write!(f, "Stopped"), + ComponentState::Closing => write!(f, "Closing"), + ComponentState::Closed => write!(f, "Closed"), + ComponentState::Error { error } => write!(f, "Error({})", error), + } + } +} + +/// Control task type +/// +/// Used to pass various control tasks between component threads and main thread +/// All task components should support these control tasks +#[derive(Debug, Clone)] +pub enum ControlTask { + /// Checkpoint task + Checkpoint { + /// Checkpoint ID + checkpoint_id: u64, + /// Timestamp (optional) + timestamp: Option, + }, + /// Stop task + Stop { + /// Stop reason (optional) + reason: Option, + }, + /// Close task + Close { + /// Close reason (optional) + reason: Option, + }, + /// Error task + Error { + /// Error message + error: String, + /// Error type (optional) + error_type: Option, + /// Whether component should be stopped + should_stop: bool, + }, +} diff --git a/src/runtime/common/mod.rs b/src/runtime/common/mod.rs new file mode 100644 index 00000000..26993dd4 --- /dev/null +++ b/src/runtime/common/mod.rs @@ -0,0 +1,21 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Common runtime components module +// +// Provides common components and state definitions for runtime + +pub mod component_state; +pub mod task_completion; + +pub use component_state::*; +pub use task_completion::*; diff --git a/src/runtime/common/task_completion.rs b/src/runtime/common/task_completion.rs new file mode 100644 index 00000000..da6d19f7 --- /dev/null +++ b/src/runtime/common/task_completion.rs @@ -0,0 +1,283 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TaskCompletionFlag - Task completion flag +// +// Used to track whether control tasks have completed processing, supports blocking wait and error message recording + +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Condvar, Mutex}; +use std::time::Duration; + +/// Default timeout (milliseconds) +pub const DEFAULT_COMPLETION_TIMEOUT_MS: u64 = 1000; + +/// Task completion result +#[derive(Debug, Clone)] +pub enum TaskResult { + /// Task completed successfully + Success, + /// Task failed + Error(String), +} + +impl TaskResult { + /// Check if successful + pub fn is_success(&self) -> bool { + matches!(self, TaskResult::Success) + } + + /// Check if failed + pub fn is_error(&self) -> bool { + matches!(self, TaskResult::Error(_)) + } + + /// Get error message + pub fn error_message(&self) -> Option<&str> { + match self { + TaskResult::Error(msg) => Some(msg), + TaskResult::Success => None, + } + } +} + +/// Task completion flag +/// +/// Used to track whether control tasks have completed processing +/// +/// Supports: +/// - Blocking wait (using Condvar, default timeout 1 second) +/// - Non-blocking check +/// - Completion notification (wake up all waiting threads) +/// - Error message recording +#[derive(Debug, Clone)] +pub struct TaskCompletionFlag { + /// Completion flag + completed: Arc, + /// Condition variable (for blocking wait notification) + condvar: Arc<(Mutex, Condvar)>, + /// Task result (success or error message) + result: Arc>>, +} + +impl TaskCompletionFlag { + /// Create new task completion flag + pub fn new() -> Self { + Self { + completed: Arc::new(AtomicBool::new(false)), + condvar: Arc::new((Mutex::new(false), Condvar::new())), + result: Arc::new(Mutex::new(None)), + } + } + + /// Mark task as successfully completed and notify all waiting threads + pub fn mark_completed(&self) { + self.complete_with_result(TaskResult::Success); + } + + /// Mark task as failed and notify all waiting threads + /// + /// # Arguments + /// - `error`: Error message + pub fn mark_error(&self, error: String) { + self.complete_with_result(TaskResult::Error(error)); + } + + /// Complete task with specified result + fn complete_with_result(&self, task_result: TaskResult) { + // Save result + { + let mut result = self.result.lock().unwrap(); + *result = Some(task_result); + } + + // Set completion flag + self.completed.store(true, Ordering::SeqCst); + + // Notify all waiting threads + let (lock, cvar) = &*self.condvar; + let mut completed = lock.lock().unwrap(); + *completed = true; + cvar.notify_all(); + } + + /// Check if task is completed (non-blocking) + pub fn is_completed(&self) -> bool { + self.completed.load(Ordering::SeqCst) + } + + /// Check if task completed successfully + pub fn is_success(&self) -> bool { + if let Ok(result) = self.result.lock() { + result.as_ref().map(|r| r.is_success()).unwrap_or(false) + } else { + false + } + } + + /// Check if task failed + pub fn is_error(&self) -> bool { + if let Ok(result) = self.result.lock() { + result.as_ref().map(|r| r.is_error()).unwrap_or(false) + } else { + false + } + } + + /// Get task result + pub fn get_result(&self) -> Option { + self.result.lock().ok().and_then(|r| r.clone()) + } + + /// Get error message + pub fn get_error(&self) -> Option { + self.result.lock().ok().and_then(|r| { + r.as_ref() + .and_then(|res| res.error_message().map(|s| s.to_string())) + }) + } + + /// Blocking wait for task completion (default timeout 1 second) + /// + /// # Returns + /// - `Ok(())`: Task completed successfully + /// - `Err(String)`: Task failed or timeout + pub fn wait(&self) -> Result<(), String> { + self.wait_timeout(Duration::from_millis(DEFAULT_COMPLETION_TIMEOUT_MS)) + } + + /// Blocking wait for task completion (specified timeout) + /// + /// # Arguments + /// - `timeout`: Timeout duration + /// + /// # Returns + /// - `Ok(())`: Task completed successfully + /// - `Err(String)`: Task failed or timeout + pub fn wait_timeout(&self, timeout: Duration) -> Result<(), String> { + // Quick check + if self.is_completed() { + return self.check_result(); + } + + let (lock, cvar) = &*self.condvar; + let completed = lock.lock().unwrap(); + + if *completed { + return self.check_result(); + } + + // Use condition variable to block wait for notification + let result = cvar.wait_timeout(completed, timeout).unwrap(); + + if *result.0 || self.is_completed() { + self.check_result() + } else { + Err("Task completion timeout".to_string()) + } + } + + /// Check task result + fn check_result(&self) -> Result<(), String> { + match self.get_result() { + Some(TaskResult::Success) => Ok(()), + Some(TaskResult::Error(e)) => Err(e), + None => Err("Task result not set".to_string()), + } + } + + /// Blocking wait for task completion (wait forever) + /// + /// # Returns + /// - `Ok(())`: Task completed successfully + /// - `Err(String)`: Task failed + pub fn wait_forever(&self) -> Result<(), String> { + if self.is_completed() { + return self.check_result(); + } + + let (lock, cvar) = &*self.condvar; + let mut completed = lock.lock().unwrap(); + + while !*completed && !self.is_completed() { + completed = cvar.wait(completed).unwrap(); + } + + self.check_result() + } +} + +impl Default for TaskCompletionFlag { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::thread; + + #[test] + fn test_task_completion_success() { + let flag = TaskCompletionFlag::new(); + + assert!(!flag.is_completed()); + assert!(!flag.is_success()); + + flag.mark_completed(); + + assert!(flag.is_completed()); + assert!(flag.is_success()); + assert!(!flag.is_error()); + assert!(flag.wait().is_ok()); + } + + #[test] + fn test_task_completion_error() { + let flag = TaskCompletionFlag::new(); + + flag.mark_error("Test error".to_string()); + + assert!(flag.is_completed()); + assert!(flag.is_error()); + assert!(!flag.is_success()); + assert_eq!(flag.get_error(), Some("Test error".to_string())); + assert!(flag.wait().is_err()); + } + + #[test] + fn test_task_completion_wait_timeout() { + let flag = TaskCompletionFlag::new(); + + let result = flag.wait_timeout(Duration::from_millis(10)); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "Task completion timeout"); + } + + #[test] + fn test_task_completion_cross_thread() { + let flag = TaskCompletionFlag::new(); + let flag_clone = flag.clone(); + + let handle = thread::spawn(move || { + thread::sleep(Duration::from_millis(50)); + flag_clone.mark_completed(); + }); + + let result = flag.wait_timeout(Duration::from_secs(1)); + assert!(result.is_ok()); + + handle.join().unwrap(); + } +} diff --git a/src/runtime/input/input_source.rs b/src/runtime/input/input_source.rs new file mode 100644 index 00000000..edfb1f31 --- /dev/null +++ b/src/runtime/input/input_source.rs @@ -0,0 +1,111 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// InputSource - Input source interface +// +// Defines the standard interface for input sources, including lifecycle management and data retrieval +// State is uniformly managed by the runloop thread + +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::taskexecutor::InitContext; + +// Re-export common component state for compatibility +pub use crate::runtime::common::ComponentState as InputSourceState; + +/// InputSource - Input source interface +/// +/// Defines the standard interface for input sources, including: +/// - Lifecycle management (init, start, stop, close) +/// - Data retrieval (get_next, poll_next) +/// - Checkpoint support (take_checkpoint, finish_checkpoint) +/// +/// State is uniformly managed by the runloop thread, callers don't need to directly manipulate state +pub trait InputSource: Send + Sync { + fn init_with_context( + &mut self, + init_context: &InitContext, + ) -> Result<(), Box>; + + /// Start input source + /// + /// Start reading data from input source + /// State is set to Running by the runloop thread + fn start(&mut self) -> Result<(), Box>; + + /// Stop input source + /// + /// Stop reading data from input source, but keep resources available + /// State is set to Stopped by the runloop thread + fn stop(&mut self) -> Result<(), Box>; + + /// Close input source + /// + /// Release all resources, the input source will no longer be usable + /// State is set to Closed by the runloop thread + fn close(&mut self) -> Result<(), Box>; + + /// Get next data + /// + /// Get next BufferOrEvent from input source + /// Returns None to indicate no data is currently available (non-blocking) + /// + /// # Returns + /// - `Ok(Some(BufferOrEvent))`: Data retrieved + /// - `Ok(None)`: No data currently available + /// - `Err(...)`: Error occurred + fn get_next(&mut self) -> Result, Box>; + + /// Poll for next data (non-blocking) + /// + /// Poll for next BufferOrEvent from input source without blocking current thread + /// If no data is currently available, returns None immediately + /// + /// # Returns + /// - `Ok(Some(BufferOrEvent))`: Data retrieved + /// - `Ok(None)`: No data currently available (non-blocking return) + /// - `Err(...)`: Error occurred + fn poll_next(&mut self) -> Result, Box> { + self.get_next() + } + + /// Start checkpoint + /// + /// Start saving current input source state for failure recovery + /// State is set to Checkpointing by the runloop thread + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box>; + + /// Finish checkpoint + /// + /// Notify input source that checkpoint is complete + /// State is set back to Running by the runloop thread + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID + fn finish_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box>; + + /// Get input group ID + /// + /// Returns the input group index this input source belongs to (0-based) + /// + /// # Returns + /// - `usize`: Input group index + fn get_group_id(&self) -> usize; +} diff --git a/src/runtime/input/input_source_provider.rs b/src/runtime/input/input_source_provider.rs new file mode 100644 index 00000000..4804d4dc --- /dev/null +++ b/src/runtime/input/input_source_provider.rs @@ -0,0 +1,141 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// InputSourceProvider - Input source provider +// +// Creates InputSource instances from configuration objects + +use crate::runtime::input::InputSource; +use crate::runtime::task::InputConfig; + +/// InputSourceProvider - Input source provider +/// +/// Creates InputSource instances from configuration objects +pub struct InputSourceProvider; + +impl InputSourceProvider { + /// Create multiple InputSource from InputConfig list + /// + /// # Arguments + /// - `input_configs`: InputConfig list + /// - `group_idx`: Input group index (used to identify which group the input source belongs to) + /// + /// # Returns + /// - `Ok(Vec>)`: Successfully created input source list + /// - `Err(...)`: Configuration parsing or creation failed + pub fn from_input_configs( + input_configs: &[InputConfig], + group_idx: usize, + ) -> Result>, Box> { + if input_configs.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Empty input configs list for input group #{}", + group_idx + 1 + ), + )) as Box); + } + + // Check input source count limit (maximum 64) + const MAX_INPUTS: usize = 64; + if input_configs.len() > MAX_INPUTS { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Too many inputs in group #{}: {} (maximum is {})", + group_idx + 1, + input_configs.len(), + MAX_INPUTS + ), + )) as Box); + } + + // Create InputSource for each InputConfig + let mut inputs = Vec::new(); + for (input_idx, input_config) in input_configs.iter().enumerate() { + let input = Self::from_input_config(input_config, group_idx, input_idx)?; + inputs.push(input); + } + + Ok(inputs) + } + + /// Create InputSource from single InputConfig + /// + /// # Arguments + /// - `input_config`: Input source configuration + /// - `group_idx`: Input group index (used to identify which group the input source belongs to) + /// - `input_idx`: Input source index within group (used to identify different input sources within the same group) + /// + /// # Returns + /// - `Ok(Box)`: Successfully created input source + /// - `Err(...)`: Parsing failed + fn from_input_config( + input_config: &InputConfig, + group_idx: usize, + input_idx: usize, + ) -> Result, Box> { + match input_config { + InputConfig::Kafka { + bootstrap_servers, + topic, + partition, + group_id, + extra, + } => { + use crate::runtime::input::protocol::kafka::{KafkaConfig, KafkaInputSource}; + + // Convert bootstrap_servers string to Vec + let servers: Vec = bootstrap_servers + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect(); + + if servers.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid bootstrap_servers in input config (group #{}): empty or invalid (topic: {}, group_id: {})", + group_idx + 1, + topic, + group_id + ), + )) as Box); + } + + // Convert partition from Option to Option + let partition_i32 = partition.map(|p| p as i32); + + // Merge extra configuration into properties + let properties = extra.clone(); + + // Create KafkaConfig + let kafka_config = KafkaConfig::new( + servers, + topic.clone(), + partition_i32, + group_id.clone(), + properties, + ); + + // Create KafkaInputSource, pass in group_idx and input_idx + Ok(Box::new(KafkaInputSource::from_config( + kafka_config, + group_idx, + input_idx, + ))) + } + } + } +} diff --git a/src/runtime/input/mod.rs b/src/runtime/input/mod.rs new file mode 100644 index 00000000..943136ed --- /dev/null +++ b/src/runtime/input/mod.rs @@ -0,0 +1,25 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Input module - Input module +// +// Provides input implementations for various data sources, including: +// - Input source interface +// - Input source provider (creates input sources from configuration) +// - Input source protocols (Kafka, etc.) + +mod input_source; +mod input_source_provider; +pub mod protocol; + +pub use input_source::{InputSource, InputSourceState}; +pub use input_source_provider::InputSourceProvider; diff --git a/src/runtime/input/protocol/kafka/config.rs b/src/runtime/input/protocol/kafka/config.rs new file mode 100644 index 00000000..a211dbe4 --- /dev/null +++ b/src/runtime/input/protocol/kafka/config.rs @@ -0,0 +1,76 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Kafka Config - Kafka configuration structure +// +// Defines configuration options for Kafka input source +// +// Note: Each input source only supports one topic and one partition + +use std::collections::HashMap; + +/// KafkaConfig - Kafka configuration +/// +/// Contains all configuration options for Kafka input source +#[derive(Debug, Clone)] +pub struct KafkaConfig { + /// Bootstrap servers (server addresses) + /// Can be a single string (comma-separated) or a list of strings + pub bootstrap_servers: Vec, + /// Topic name (single) + pub topic: String, + /// Partition ID (optional, uses subscribe auto-assignment if not specified) + pub partition: Option, + /// Consumer group ID + pub group_id: String, + /// Other configuration items (key-value pairs) + pub properties: HashMap, +} + +impl KafkaConfig { + /// Create new Kafka configuration + /// + /// # Arguments + /// - `bootstrap_servers`: List of Kafka broker addresses + /// - `topic`: Topic name (single) + /// - `partition`: Partition ID (optional) + /// - `group_id`: Consumer group ID + /// - `properties`: Other configuration items + pub fn new( + bootstrap_servers: Vec, + topic: String, + partition: Option, + group_id: String, + properties: HashMap, + ) -> Self { + Self { + bootstrap_servers, + topic, + partition, + group_id, + properties, + } + } + + /// Get bootstrap servers string (comma-separated) + pub fn bootstrap_servers_str(&self) -> String { + self.bootstrap_servers.join(",") + } + + /// Get partition display string + pub fn partition_str(&self) -> String { + match self.partition { + Some(p) => p.to_string(), + None => "auto".to_string(), + } + } +} diff --git a/src/runtime/input/protocol/kafka/input_source.rs b/src/runtime/input/protocol/kafka/input_source.rs new file mode 100644 index 00000000..6c3fa26c --- /dev/null +++ b/src/runtime/input/protocol/kafka/input_source.rs @@ -0,0 +1,803 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// KafkaInputSource - Kafka input source implementation +// +// Implements InputSource that reads data from Kafka message queue +// Uses rdkafka client library for actual Kafka consumption +// Has an internal Kafka thread continuously consuming and putting messages into a fixed-length channel +// State changes are uniformly handled by the runloop thread (except init) + +use super::config::KafkaConfig; +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::common::TaskCompletionFlag; +use crate::runtime::input::{InputSource, InputSourceState}; +use crossbeam_channel::{Receiver, Sender, bounded}; +use rdkafka::Message; +use rdkafka::TopicPartitionList; +use rdkafka::config::ClientConfig; +use rdkafka::consumer::{BaseConsumer, Consumer}; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::time::Duration; + +// ==================== Constants ==================== + +/// Default channel capacity (maximum number of messages in fixed-length channel) +const DEFAULT_CHANNEL_CAPACITY: usize = 1000; + +/// Maximum number of messages for single batch consumption (to avoid continuous consumption preventing control signals from being processed) +const MAX_BATCH_CONSUME_SIZE: usize = 50; + +/// Control operation timeout (milliseconds) +const CONTROL_OPERATION_TIMEOUT_MS: u64 = 5000; + +/// Maximum retry count for control operations +const CONTROL_OPERATION_MAX_RETRIES: u32 = 3; + +// ==================== Enum Definitions ==================== + +/// Input source control signal (control layer) +/// +/// Each signal contains a `completion_flag` to track whether the task has completed +#[derive(Debug, Clone)] +enum SourceControlSignal { + /// Start signal + Start { completion_flag: TaskCompletionFlag }, + /// Stop signal + Stop { completion_flag: TaskCompletionFlag }, + /// Close signal + Close { completion_flag: TaskCompletionFlag }, + /// Checkpoint start signal + Checkpoint { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, + /// Checkpoint finish signal + CheckpointFinish { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, +} + +/// Control signal processing result +enum ControlAction { + /// Continue running (process data) + Continue, + /// Pause (stop processing data, block waiting for control signal) + Pause, + /// Exit thread + Exit, +} + +// ==================== Struct Definitions ==================== + +/// KafkaInputSource - Kafka input source +/// +/// Reads messages from Kafka topic and converts them to BufferOrEvent +/// +/// Architecture: +/// - Has an internal Kafka consumer thread continuously consuming messages +/// - Consumed messages are put into a fixed-length channel +/// - Processor consumes data from the channel +/// - State changes are uniformly handled by the runloop thread (except init) +/// +/// Note: Only cares about the byte array content of messages, does not parse internal structure of Kafka messages (topic, partition, offset, etc.) +pub struct KafkaInputSource { + /// Kafka configuration + config: KafkaConfig, + /// Input group ID (starting from 0) + group_id: usize, + /// Input source ID within group (starting from 0, used to identify different input sources within the same group) + input_id: usize, + /// Component state (shared, uniformly managed by runloop thread) + state: Arc>, + /// Message channel sender (used by runloop thread, sends wrapped BufferOrEvent) + data_sender: Option>, + /// Message channel receiver (Processor consumes BufferOrEvent from here) + data_receiver: Option>, + /// Control signal channel sender (used by main thread, sends control signals) + control_sender: Option>, + /// Control signal channel receiver (used by runloop thread, receives control signals) + control_receiver: Option>, + /// Kafka consumer thread handle + consumer_thread: Option>, +} + +impl KafkaInputSource { + /// Create new Kafka input source from configuration + /// + /// # Arguments + /// - `config`: Kafka configuration + /// - `group_id`: Input group ID (starting from 0) + /// - `input_id`: Input source ID within group (starting from 0, used to identify different input sources within the same group) + pub fn from_config(config: KafkaConfig, group_id: usize, input_id: usize) -> Self { + Self { + config, + group_id, + input_id, + state: Arc::new(Mutex::new(InputSourceState::Uninitialized)), + data_sender: None, + data_receiver: None, + control_sender: None, + control_receiver: None, + consumer_thread: None, + } + } + + // ==================== Timeout Retry Helper Functions ==================== + + /// Wait for control operation completion with timeout retry + /// + /// Waits for completion_flag to mark completion and checks operation result + fn wait_with_retry( + &self, + completion_flag: &TaskCompletionFlag, + operation_name: &str, + ) -> Result<(), Box> { + let timeout = Duration::from_millis(CONTROL_OPERATION_TIMEOUT_MS); + + for retry in 0..CONTROL_OPERATION_MAX_RETRIES { + match completion_flag.wait_timeout(timeout) { + Ok(_) => { + // Check operation result + if let Some(error) = completion_flag.get_error() { + return Err(Box::new(std::io::Error::other(format!( + "{} failed: {}", + operation_name, error + )))); + } + return Ok(()); + } + Err(_) => { + log::warn!( + "{} timeout (retry {}/{}), topic: {}", + operation_name, + retry + 1, + CONTROL_OPERATION_MAX_RETRIES, + self.config.topic + ); + } + } + } + + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!( + "{} failed after {} retries", + operation_name, CONTROL_OPERATION_MAX_RETRIES + ), + ))) + } + + // ==================== Consumer Thread Main Loop ==================== + + /// Consumer thread main loop + /// + /// State machine driven event loop: + /// - Running state: simultaneously waits for control signals and consumes Kafka messages + /// - Paused state: only blocks waiting for control signals + /// - All state changes are uniformly handled in this thread + fn consumer_thread_loop( + consumer: BaseConsumer, + data_sender: Sender, + control_receiver: Receiver, + state: Arc>, + config: KafkaConfig, + ) { + use crossbeam_channel::select; + + // Initial state is paused, waiting for Start signal + let mut is_running = false; + log::debug!( + "Consumer thread started (paused), waiting for start signal for topic: {} partition: {}", + config.topic, + config.partition_str() + ); + + loop { + if is_running { + // ========== Running state: simultaneously wait for control signals and consume Kafka messages ========== + select! { + recv(control_receiver) -> result => { + match result { + Ok(signal) => { + match Self::handle_control_signal(signal, &state, &config) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => { + is_running = false; + log::info!("Source paused for topic: {} partition: {}", config.topic, config.partition_str()); + } + ControlAction::Exit => break, + } + } + Err(_) => { + log::warn!("Control channel disconnected for topic: {} partition: {}", config.topic, config.partition_str()); + break; + } + } + } + default(Duration::from_millis(100)) => { + // Poll messages from Kafka + Self::poll_and_send_messages(&consumer, &data_sender, &config); + } + } + } else { + // ========== Paused state: only block waiting for control signals ========== + match control_receiver.recv() { + Ok(signal) => match Self::handle_control_signal(signal, &state, &config) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => is_running = false, + ControlAction::Exit => break, + }, + Err(_) => { + log::warn!( + "Control channel disconnected for topic: {} partition: {}", + config.topic, + config.partition_str() + ); + break; + } + } + } + } + + // Don't commit offset, allow duplicate consumption + log::info!( + "Consumer thread exiting for topic: {} partition: {} (offset not committed)", + config.topic, + config.partition_str() + ); + } + + // ==================== Control Layer Functions ==================== + + /// Handle control signal (executed in runloop thread, uniformly manages state changes) + /// + /// Note: Does not commit offset, allows duplicate consumption + fn handle_control_signal( + signal: SourceControlSignal, + state: &Arc>, + config: &KafkaConfig, + ) -> ControlAction { + let current_state = state.lock().unwrap().clone(); + + match signal { + SourceControlSignal::Start { completion_flag } => { + // Can only Start in Initialized or Stopped state + if !matches!( + current_state, + InputSourceState::Initialized | InputSourceState::Stopped + ) { + let error = format!("Cannot start in state: {:?}", current_state); + log::error!( + "{} for topic: {} partition: {}", + error, + config.topic, + config.partition_str() + ); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::debug!( + "Source start signal received for topic: {} partition: {}", + config.topic, + config.partition_str() + ); + *state.lock().unwrap() = InputSourceState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + SourceControlSignal::Stop { completion_flag } => { + // Can only Stop in Running or Checkpointing state + if !matches!( + current_state, + InputSourceState::Running | InputSourceState::Checkpointing + ) { + // Stop operation silently succeeds if state is wrong (idempotent) + log::debug!( + "Stop ignored in state: {:?} for topic: {} partition: {}", + current_state, + config.topic, + config.partition_str() + ); + completion_flag.mark_completed(); + return ControlAction::Pause; + } + log::info!( + "Source stop signal received for topic: {} partition: {}", + config.topic, + config.partition_str() + ); + *state.lock().unwrap() = InputSourceState::Stopped; + completion_flag.mark_completed(); + ControlAction::Pause + } + SourceControlSignal::Close { completion_flag } => { + // Close can be executed in any state + log::info!( + "Source close signal received for topic: {} partition: {}", + config.topic, + config.partition_str() + ); + *state.lock().unwrap() = InputSourceState::Closing; + *state.lock().unwrap() = InputSourceState::Closed; + completion_flag.mark_completed(); + ControlAction::Exit + } + SourceControlSignal::Checkpoint { + checkpoint_id, + completion_flag, + } => { + // Can only Checkpoint in Running state + if !matches!(current_state, InputSourceState::Running) { + let error = format!("Cannot take checkpoint in state: {:?}", current_state); + log::error!( + "{} for topic: {} partition: {}", + error, + config.topic, + config.partition_str() + ); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::info!( + "Checkpoint {} started for topic: {} partition: {}", + checkpoint_id, + config.topic, + config.partition_str() + ); + *state.lock().unwrap() = InputSourceState::Checkpointing; + log::info!( + "Checkpoint {}: Skipped offset commit (allow duplicate consumption)", + checkpoint_id + ); + completion_flag.mark_completed(); + ControlAction::Continue + } + SourceControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag, + } => { + // Can only CheckpointFinish in Checkpointing state + if !matches!(current_state, InputSourceState::Checkpointing) { + let error = format!("Cannot finish checkpoint in state: {:?}", current_state); + log::error!( + "{} for topic: {} partition: {}", + error, + config.topic, + config.partition_str() + ); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::info!( + "Checkpoint {} finish for topic: {} partition: {}", + checkpoint_id, + config.topic, + config.partition_str() + ); + *state.lock().unwrap() = InputSourceState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + } + } + + /// Poll messages from Kafka and send to channel + /// + /// Runloop thread is responsible for constructing BufferOrEvent, main thread consumes directly + /// Note: Does not commit offset, allows duplicate consumption + fn poll_and_send_messages( + consumer: &BaseConsumer, + data_sender: &Sender, + config: &KafkaConfig, + ) { + // Batch consumption, limit quantity + let mut batch_count = 0; + + while batch_count < MAX_BATCH_CONSUME_SIZE { + // If queue is full before consumption, exit this batch directly + if data_sender.is_full() { + break; + } + match consumer.poll(Duration::from_millis(10)) { + None => break, // No more messages + Some(Ok(message)) => { + if let Some(payload) = message.payload() { + let bytes = payload.to_vec(); + let channel_info = Some(config.topic.clone()); + // Construct BufferOrEvent in runloop thread + let buffer_or_event = BufferOrEvent::new_buffer( + bytes, + channel_info, + false, // more_available cannot be determined when sending, left for consumer to judge + false, // is_broadcast + ); + + match data_sender.try_send(buffer_or_event) { + Ok(_) => { + // Don't commit offset, allow duplicate consumption + batch_count += 1; + // Immediately check if queue is full after putting + if data_sender.is_full() { + break; + } + } + Err(crossbeam_channel::TrySendError::Full(_)) => { + // Channel full, process next time + break; + } + Err(crossbeam_channel::TrySendError::Disconnected(_)) => { + // Channel disconnected + break; + } + } + } + // Don't commit offset when there's no payload either + } + Some(Err(e)) => { + log::error!( + "Kafka poll error for topic {} partition {}: {}", + config.topic, + config.partition_str(), + e + ); + break; + } + } + } + } + + // ==================== Configuration Validation ==================== + + fn validate_kafka_config(&self) -> Result<(), Box> { + if self.config.bootstrap_servers.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Kafka bootstrap_servers is required", + ))); + } + + if self.config.group_id.trim().is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Kafka group_id is required", + ))); + } + + if self.config.topic.trim().is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Kafka topic is required", + ))); + } + + if let Some(partition) = self.config.partition + && partition < 0 + { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("Kafka partition must be >= 0, got: {}", partition), + ))); + } + + // Validate enable.auto.commit must be false + if let Some(auto_commit) = self.config.properties.get("enable.auto.commit") + && auto_commit.to_lowercase().trim() == "true" + { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "enable.auto.commit must be false for manual offset commit", + ))); + } + + Ok(()) + } + + fn create_consumer(&self) -> Result> { + self.validate_kafka_config()?; + + let mut client_config = ClientConfig::new(); + + client_config.set("bootstrap.servers", self.config.bootstrap_servers_str()); + client_config.set("group.id", &self.config.group_id); + client_config.set("enable.partition.eof", "false"); + client_config.set("enable.auto.commit", "false"); + + for (key, value) in &self.config.properties { + if key != "enable.auto.commit" { + client_config.set(key, value); + } + } + + let consumer: BaseConsumer = client_config.create().map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to create Kafka consumer: {}", + e + ))) as Box + })?; + + // Subscribe to topic or assign specific partition + if let Some(partition) = self.config.partition { + // Partition specified, use assign + let mut tpl = TopicPartitionList::new(); + tpl.add_partition(&self.config.topic, partition); + consumer.assign(&tpl).map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to assign partition {}: {}", + partition, e + ))) as Box + })?; + } else { + // Partition not specified, use subscribe auto-assignment + consumer.subscribe(&[&self.config.topic]).map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to subscribe to topic '{}': {}", + self.config.topic, e + ))) as Box + })?; + } + + Ok(consumer) + } +} + +// ==================== InputSource Trait Implementation ==================== + +impl InputSource for KafkaInputSource { + fn init_with_context( + &mut self, + init_context: &crate::runtime::taskexecutor::InitContext, + ) -> Result<(), Box> { + // init_with_context is the only method that sets state in the caller thread (because runloop thread hasn't started yet) + if !matches!(*self.state.lock().unwrap(), InputSourceState::Uninitialized) { + return Ok(()); + } + + self.validate_kafka_config()?; + + // Create Channel + let (data_sender, data_receiver) = bounded(DEFAULT_CHANNEL_CAPACITY); + let (control_sender, control_receiver) = bounded(10); + + // Save both ends of channel to struct + // data_sender can be cloned, so it can be saved and used simultaneously + // control_receiver cannot be cloned, needs to be taken from struct and moved to thread + self.data_sender = Some(data_sender.clone()); + self.data_receiver = Some(data_receiver); + self.control_sender = Some(control_sender); + self.control_receiver = Some(control_receiver); + + // Create Kafka consumer and start thread + let consumer = self.create_consumer()?; + let config_clone = self.config.clone(); + let state_clone = self.state.clone(); + + // Take control_receiver from struct for thread use + let control_receiver_for_thread = self.control_receiver.take().ok_or_else(|| { + Box::new(std::io::Error::other("control_receiver is None")) + as Box + })?; + + let thread_name = format!( + "kafka-source-g{}-i{}-{}-{}", + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + let thread_handle = thread::Builder::new() + .name(thread_name.clone()) + .spawn(move || { + Self::consumer_thread_loop( + consumer, + data_sender, + control_receiver_for_thread, + state_clone, + config_clone, + ); + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to start thread: {}", + e + ))) as Box + })?; + + // Register thread group to InitContext + use crate::runtime::processor::wasm::thread_pool::{ThreadGroup, ThreadGroupType}; + let mut input_thread_group = ThreadGroup::new( + ThreadGroupType::InputSource(self.group_id), + format!("InputSource-g{}-i{}", self.group_id, self.input_id), + ); + input_thread_group.add_thread(thread_handle); + init_context.register_thread_group(input_thread_group); + + // Note: Thread handle has been moved to thread group, no longer stored in consumer_thread + // When closing, need to manage thread through TaskHandle + self.consumer_thread = None; + *self.state.lock().unwrap() = InputSourceState::Initialized; + Ok(()) + } + + fn start(&mut self) -> Result<(), Box> { + // Don't check state in main thread, handled by runloop thread's handle_control_signal + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + control_sender + .send(SourceControlSignal::Start { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send start signal: {}", + e + ))) as Box + })?; + } + + self.wait_with_retry(&completion_flag, "Start")?; + + log::debug!( + "KafkaInputSource started: group_id={}, input_id={}, topic={}, partition={}", + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + Ok(()) + } + + fn stop(&mut self) -> Result<(), Box> { + // Don't check state in main thread, handled by runloop thread's handle_control_signal + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + control_sender + .send(SourceControlSignal::Stop { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send stop signal: {}", + e + ))) as Box + })?; + } + + self.wait_with_retry(&completion_flag, "Stop")?; + + log::info!( + "KafkaInputSource stopped: group_id={}, input_id={}, topic={}, partition={}", + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + Ok(()) + } + + fn close(&mut self) -> Result<(), Box> { + if matches!(*self.state.lock().unwrap(), InputSourceState::Closed) { + return Ok(()); + } + + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SourceControlSignal::Close { + completion_flag: completion_flag.clone(), + }; + if control_sender.send(signal).is_ok() { + let _ = self.wait_with_retry(&completion_flag, "Close"); + } + } + + // Note: Thread handle has been moved to thread group, uniformly managed by TaskHandle + // No need to join here, thread group will wait uniformly in TaskHandle + + // Clean up resources + self.data_sender.take(); + self.data_receiver.take(); + self.control_sender.take(); + self.control_receiver.take(); + + log::info!( + "KafkaInputSource closed: group_id={}, input_id={}, topic={}, partition={}", + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + Ok(()) + } + + fn get_next(&mut self) -> Result, Box> { + // Directly get BufferOrEvent constructed by runloop thread from channel + if let Some(ref receiver) = self.data_receiver { + match receiver.try_recv() { + Ok(buffer_or_event) => Ok(Some(buffer_or_event)), + Err(crossbeam_channel::TryRecvError::Empty) => Ok(None), + Err(crossbeam_channel::TryRecvError::Disconnected) => Ok(None), + } + } else { + Ok(None) + } + } + + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Don't check state in main thread, handled by runloop thread's handle_control_signal + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SourceControlSignal::Checkpoint { + checkpoint_id, + completion_flag: completion_flag.clone(), + }; + control_sender.send(signal).map_err(|e| { + Box::new(std::io::Error::other(format!( + "Checkpoint signal failed: {}", + e + ))) as Box + })?; + } + + self.wait_with_retry(&completion_flag, "Checkpoint")?; + + log::info!( + "Checkpoint {} started: group_id={}, input_id={}, topic={}, partition={}", + checkpoint_id, + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + Ok(()) + } + + fn finish_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Don't check state in main thread, handled by runloop thread's handle_control_signal + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SourceControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag: completion_flag.clone(), + }; + control_sender.send(signal).map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send checkpoint finish signal: {}", + e + ))) as Box + })?; + } + + self.wait_with_retry(&completion_flag, "CheckpointFinish")?; + + log::info!( + "Checkpoint {} finished: group_id={}, input_id={}, topic={}, partition={}", + checkpoint_id, + self.group_id, + self.input_id, + self.config.topic, + self.config.partition_str() + ); + Ok(()) + } + + fn get_group_id(&self) -> usize { + self.group_id + } +} diff --git a/src/runtime/input/protocol/kafka/mod.rs b/src/runtime/input/protocol/kafka/mod.rs new file mode 100644 index 00000000..e982aab4 --- /dev/null +++ b/src/runtime/input/protocol/kafka/mod.rs @@ -0,0 +1,19 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Kafka Protocol + +pub mod config; +pub mod input_source; + +pub use config::*; +pub use input_source::*; diff --git a/src/runtime/input/protocol/mod.rs b/src/runtime/input/protocol/mod.rs new file mode 100644 index 00000000..b9574391 --- /dev/null +++ b/src/runtime/input/protocol/mod.rs @@ -0,0 +1,13 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod kafka; diff --git a/src/runtime/mod.rs b/src/runtime/mod.rs new file mode 100644 index 00000000..f69ad017 --- /dev/null +++ b/src/runtime/mod.rs @@ -0,0 +1,23 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Runtime module + +pub mod buffer_and_event; +pub mod common; +pub mod input; +pub mod output; +pub mod processor; +pub mod sink; +pub mod source; +pub mod task; +pub mod taskexecutor; diff --git a/src/runtime/output/mod.rs b/src/runtime/output/mod.rs new file mode 100644 index 00000000..d2f46906 --- /dev/null +++ b/src/runtime/output/mod.rs @@ -0,0 +1,25 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Output module - Output module +// +// Provides output implementations, including: +// - Output sink interface +// - Output sink provider (creates output sinks from configuration) +// - Output protocols (Kafka, etc.) + +mod output_sink; +mod output_sink_provider; +mod protocol; + +pub use output_sink::OutputSink; +pub use output_sink_provider::OutputSinkProvider; diff --git a/src/runtime/output/output_sink.rs b/src/runtime/output/output_sink.rs new file mode 100644 index 00000000..07f789d5 --- /dev/null +++ b/src/runtime/output/output_sink.rs @@ -0,0 +1,130 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// OutputSink - Output sink interface +// +// Output sink interface supporting lifecycle management and data sending + +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::taskexecutor::InitContext; + +/// OutputSink - Output sink interface +/// +/// Supports complete lifecycle management and data output functionality +pub trait OutputSink: Send + Sync { + /// Initialize output sink with initialization context + /// + /// Called before use to perform necessary initialization work + /// + /// # Arguments + /// - `init_context`: Initialization context containing state storage, task storage and other resources + fn init_with_context( + &mut self, + init_context: &InitContext, + ) -> Result<(), Box>; + + /// Start output sink + /// + /// Start sending data to external systems + fn start(&mut self) -> Result<(), Box>; + + /// Stop output sink + /// + /// Stop sending data, but keep resources available + fn stop(&mut self) -> Result<(), Box>; + + /// Close output sink + /// + /// Release all resources, the sink will no longer be usable + fn close(&mut self) -> Result<(), Box>; + + /// Collect data + /// + /// Collect BufferOrEvent into output sink + /// + /// # Arguments + /// - `data`: Data to collect + /// + /// # Returns + /// - `Ok(())`: Collection successful + /// - `Err(...)`: Collection failed + fn collect(&mut self, data: BufferOrEvent) -> Result<(), Box>; + + /// Restore state + /// + /// Restore output sink state from checkpoint + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID + /// + /// # Returns + /// - `Ok(())`: Restore successful + /// - `Err(...)`: Restore failed + fn restore_state( + &mut self, + _checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: state restoration not supported + Ok(()) + } + + /// Start checkpoint + /// + /// Start saving current output sink state for failure recovery + /// State transition: Running -> Checkpointing + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID + /// + /// # Returns + /// - `Ok(())`: Checkpoint start successful + /// - `Err(...)`: Checkpoint start failed + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box>; + + /// Finish checkpoint + /// + /// Notify output sink that checkpoint is complete + /// State transition: Checkpointing -> Running + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID + /// + /// # Returns + /// - `Ok(())`: Checkpoint finish successful + /// - `Err(...)`: Checkpoint finish failed + fn finish_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box>; + + /// Flush buffered data + /// + /// Ensure all buffered data is sent out + /// + /// # Returns + /// - `Ok(())`: Flush successful + /// - `Err(...)`: Flush failed + fn flush(&mut self) -> Result<(), Box> { + Ok(()) // Default implementation: no flush needed + } + + /// Clone OutputSink (returns Box) + /// + /// Used to create a clone of OutputSink + /// + /// # Returns + /// - `Box`: Cloned OutputSink instance + fn box_clone(&self) -> Box; +} diff --git a/src/runtime/output/output_sink_provider.rs b/src/runtime/output/output_sink_provider.rs new file mode 100644 index 00000000..32f79b8c --- /dev/null +++ b/src/runtime/output/output_sink_provider.rs @@ -0,0 +1,126 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// OutputSinkProvider - Output sink provider +// +// Creates OutputSink instances from configuration objects + +use crate::runtime::output::OutputSink; +use crate::runtime::task::OutputConfig; + +/// OutputSinkProvider - Output sink provider +/// +/// Creates OutputSink instances from configuration objects +pub struct OutputSinkProvider; + +impl OutputSinkProvider { + /// Create multiple OutputSink from OutputConfig list + /// + /// # Arguments + /// - `output_configs`: OutputConfig list + /// + /// # Returns + /// - `Ok(Vec>)`: Successfully created output sink list + /// - `Err(...)`: Configuration parsing or creation failed + pub fn from_output_configs( + output_configs: &[OutputConfig], + ) -> Result>, Box> { + if output_configs.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Empty output configs list", + )) as Box); + } + + // Check output sink count limit (maximum 64) + const MAX_OUTPUTS: usize = 64; + if output_configs.len() > MAX_OUTPUTS { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Too many outputs: {} (maximum is {})", + output_configs.len(), + MAX_OUTPUTS + ), + )) as Box); + } + + // Create OutputSink for each OutputConfig + let mut outputs = Vec::new(); + for (sink_idx, output_config) in output_configs.iter().enumerate() { + let output = Self::from_output_config(output_config, sink_idx)?; + outputs.push(output); + } + + Ok(outputs) + } + + /// Create OutputSink from a single OutputConfig + /// + /// # Arguments + /// - `output_config`: Output configuration + /// - `sink_idx`: Output sink index (used to identify different output sinks) + /// + /// # Returns + /// - `Ok(Box)`: Successfully created output sink + /// - `Err(...)`: Parsing failed + fn from_output_config( + output_config: &OutputConfig, + sink_idx: usize, + ) -> Result, Box> { + match output_config { + OutputConfig::Kafka { + bootstrap_servers, + topic, + partition, + extra, + } => { + use crate::runtime::output::protocol::kafka::{ + KafkaOutputSink, KafkaProducerConfig, + }; + + // Convert bootstrap_servers string to Vec + let servers: Vec = bootstrap_servers + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect(); + + if servers.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid bootstrap_servers in output config: empty or invalid (topic: {})", + topic + ), + )) as Box); + } + + // Convert partition from u32 to Option + let partition_opt = Some(*partition as i32); + + // Merge extra configuration into properties + let properties = extra.clone(); + + // Create KafkaProducerConfig + let kafka_config = + KafkaProducerConfig::new(servers, topic.clone(), partition_opt, properties); + + // Create KafkaOutputSink, passing sink_idx + Ok(Box::new(KafkaOutputSink::from_config( + kafka_config, + sink_idx, + ))) + } + } + } +} diff --git a/src/runtime/output/protocol/kafka/mod.rs b/src/runtime/output/protocol/kafka/mod.rs new file mode 100644 index 00000000..cf5c082f --- /dev/null +++ b/src/runtime/output/protocol/kafka/mod.rs @@ -0,0 +1,21 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Kafka Protocol - Kafka protocol package +// +// Provides Kafka protocol-related output implementations + +pub mod output_sink; +pub mod producer_config; + +pub use output_sink::*; +pub use producer_config::*; diff --git a/src/runtime/output/protocol/kafka/output_sink.rs b/src/runtime/output/protocol/kafka/output_sink.rs new file mode 100644 index 00000000..034decbc --- /dev/null +++ b/src/runtime/output/protocol/kafka/output_sink.rs @@ -0,0 +1,830 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! KafkaOutputSink - Kafka output sink implementation +//! +//! Implements OutputSink for sending data to Kafka message queue. +//! Uses rdkafka client library for actual Kafka production. + +use super::producer_config::KafkaProducerConfig; +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::common::{ComponentState, TaskCompletionFlag}; +use crate::runtime::output::OutputSink; +use rdkafka::producer::{BaseRecord, DefaultProducerContext, Producer, ThreadedProducer}; +use std::sync::Arc; +use std::sync::Mutex; + +// ==================== Constants ==================== + +/// Default channel capacity (maximum messages in bounded channel) +const DEFAULT_CHANNEL_CAPACITY: usize = 1000; + +/// Maximum batch size per consume (prevents control signals from being blocked) +const MAX_BATCH_CONSUME_SIZE: usize = 100; + +/// Default flush timeout (milliseconds) +const DEFAULT_FLUSH_TIMEOUT_MS: u64 = 5000; + +/// Control operation timeout (milliseconds) +const CONTROL_OPERATION_TIMEOUT_MS: u64 = 5000; + +/// Maximum retries for control operations +const CONTROL_OPERATION_MAX_RETRIES: u32 = 5; + +// ==================== Enum Definitions ==================== + +/// Sink control signal (control layer) +/// +/// Each signal includes a `completion_flag` to track task completion. +#[derive(Debug, Clone)] +enum SinkControlSignal { + /// Start signal + Start { completion_flag: TaskCompletionFlag }, + /// Stop signal + Stop { completion_flag: TaskCompletionFlag }, + /// Close signal + Close { completion_flag: TaskCompletionFlag }, + /// Begin checkpoint signal + Checkpoint { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, + /// End checkpoint signal + CheckpointFinish { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, + /// Flush signal + Flush { completion_flag: TaskCompletionFlag }, +} + +/// Control signal processing result +enum ControlAction { + /// Continue running (process data) + Continue, + /// Pause (stop processing data, block waiting for control signals) + Pause, + /// Exit thread + Exit, +} + +// ==================== Struct Definitions ==================== + +/// KafkaOutputSink - Kafka output sink +/// +/// Uses a dedicated thread for data sending, with internal data cache Channel. +/// Architecture: +/// - Main thread puts data into Channel +/// - Send thread consumes data from Channel and sends to Kafka +/// - Supports control signals (stop, close, checkpoint) +/// - State changes are managed uniformly by runloop thread (except init) +pub struct KafkaOutputSink { + /// Kafka configuration + config: KafkaProducerConfig, + /// Output sink ID (starting from 0, identifies different output sinks) + sink_id: usize, + /// Component state (shared, managed uniformly by runloop thread) + state: Arc>, + /// Data send thread + send_thread: Option>, + /// Data cache Channel sender (main thread writes) + data_sender: Option>, + /// Data cache Channel receiver (send thread reads) + data_receiver: Option>, + /// Control signal Channel sender + control_sender: Option>, + /// Control signal Channel receiver + control_receiver: Option>, +} + +impl KafkaOutputSink { + // ==================== Configuration/Construction ==================== + + /// Create a new Kafka output sink + /// + /// # Arguments + /// - `config`: Kafka configuration + /// - `sink_id`: Output sink ID (starting from 0, identifies different output sinks) + pub fn new(config: KafkaProducerConfig, sink_id: usize) -> Self { + Self { + config, + sink_id, + state: Arc::new(Mutex::new(ComponentState::Uninitialized)), + send_thread: None, + data_sender: None, + data_receiver: None, + control_sender: None, + control_receiver: None, + } + } + + /// Create from configuration + /// + /// # Arguments + /// - `config`: Kafka configuration + /// - `sink_id`: Output sink ID (starting from 0, identifies different output sinks) + pub fn from_config(config: KafkaProducerConfig, sink_id: usize) -> Self { + Self::new(config, sink_id) + } + + // ==================== Timeout Retry Helper Functions ==================== + + /// Wait for control operation completion with timeout and retry + /// + /// Waits for completion_flag to be marked complete and checks operation result. + fn wait_with_retry( + &self, + completion_flag: &TaskCompletionFlag, + operation_name: &str, + ) -> Result<(), Box> { + let timeout = std::time::Duration::from_millis(CONTROL_OPERATION_TIMEOUT_MS); + + for retry in 0..CONTROL_OPERATION_MAX_RETRIES { + match completion_flag.wait_timeout(timeout) { + Ok(_) => { + // Check operation result + if let Some(error) = completion_flag.get_error() { + return Err(Box::new(std::io::Error::other(format!( + "{} failed: {}", + operation_name, error + )))); + } + return Ok(()); + } + Err(_) => { + log::warn!( + "{} timeout (retry {}/{}), topic: {}", + operation_name, + retry + 1, + CONTROL_OPERATION_MAX_RETRIES, + self.config.topic + ); + } + } + } + + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!( + "{} failed after {} retries", + operation_name, CONTROL_OPERATION_MAX_RETRIES + ), + ))) + } + + /// Flush-specific timeout retry wait, checks operation result + fn wait_flush_with_retry( + &self, + completion_flag: &TaskCompletionFlag, + ) -> Result<(), Box> { + let timeout = std::time::Duration::from_millis(CONTROL_OPERATION_TIMEOUT_MS); + + for retry in 0..CONTROL_OPERATION_MAX_RETRIES { + match completion_flag.wait_timeout(timeout) { + Ok(_) => { + // Check operation result + if let Some(error) = completion_flag.get_error() { + return Err(Box::new(std::io::Error::other(format!( + "Flush failed: {}", + error + )))); + } + return Ok(()); + } + Err(_) => { + log::warn!( + "Flush timeout (retry {}/{}), topic: {}", + retry + 1, + CONTROL_OPERATION_MAX_RETRIES, + self.config.topic + ); + } + } + } + + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!( + "Flush failed after {} retries", + CONTROL_OPERATION_MAX_RETRIES + ), + ))) + } + + // ==================== Send Thread Main Loop ==================== + + /// Send thread main loop + /// + /// State machine driven event loop: + /// - Running state: waits for both control signals and data + /// - Paused state: blocks waiting only for control signals + /// - All state changes are handled uniformly in this thread + fn send_thread_loop( + producer: ThreadedProducer, + data_receiver: crossbeam_channel::Receiver, + control_receiver: crossbeam_channel::Receiver, + state: Arc>, + config: KafkaProducerConfig, + ) { + use crossbeam_channel::select; + + // Initial state is paused, waiting for Start signal + let mut is_running = false; + log::debug!( + "Send thread started (paused), waiting for start signal for topic: {}", + config.topic + ); + + loop { + if is_running { + // ========== Running state: wait for both control signals and data ========== + select! { + recv(control_receiver) -> result => { + match result { + Ok(signal) => { + match Self::handle_control_signal(signal, &producer, &data_receiver, &state, &config) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => { + is_running = false; + log::info!("Sink paused for topic: {}", config.topic); + } + ControlAction::Exit => break, + } + } + Err(_) => { + log::warn!("Control channel disconnected for topic: {}", config.topic); + break; + } + } + } + recv(data_receiver) -> result => { + match result { + Ok(data) => { + // Send current message + Self::send_message(&producer, data, &config); + + // Non-blocking consume and send more data, reduces select! scheduling overhead + // Limit batch size to ensure control signals are processed in time + let mut batch_count = 1; + while batch_count < MAX_BATCH_CONSUME_SIZE { + match data_receiver.try_recv() { + Ok(more_data) => { + Self::send_message(&producer, more_data, &config); + batch_count += 1; + } + Err(_) => break, + } + } + + // Flush after batch ends, ensures messages are sent to Kafka + Self::flush_producer(&producer); + } + Err(_) => { + log::info!("Data channel disconnected for topic: {}", config.topic); + break; + } + } + } + } + } else { + // ========== Paused state: block waiting only for control signals ========== + match control_receiver.recv() { + Ok(signal) => { + match Self::handle_control_signal( + signal, + &producer, + &data_receiver, + &state, + &config, + ) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => is_running = false, + ControlAction::Exit => break, + } + } + Err(_) => { + log::warn!("Control channel disconnected for topic: {}", config.topic); + break; + } + } + } + } + + log::info!("Send thread exiting for topic: {}", config.topic); + } + + // ==================== Control Layer Functions ==================== + + /// Handle control signal (executed in runloop thread, manages state changes and checks uniformly) + fn handle_control_signal( + signal: SinkControlSignal, + producer: &ThreadedProducer, + data_receiver: &crossbeam_channel::Receiver, + state: &Arc>, + config: &KafkaProducerConfig, + ) -> ControlAction { + let current_state = state.lock().unwrap().clone(); + + match signal { + SinkControlSignal::Start { completion_flag } => { + // Only Initialized or Stopped state can Start + if !matches!( + current_state, + ComponentState::Initialized | ComponentState::Stopped + ) { + let error = format!("Cannot start in state: {:?}", current_state); + log::error!("{} for topic: {}", error, config.topic); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::debug!("Sink start signal received for topic: {}", config.topic); + *state.lock().unwrap() = ComponentState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + SinkControlSignal::Stop { completion_flag } => { + // Only Running or Checkpointing state can Stop + if !matches!( + current_state, + ComponentState::Running | ComponentState::Checkpointing + ) { + // Stop operation silently succeeds if state is wrong (idempotent) + log::debug!( + "Stop ignored in state: {:?} for topic: {}", + current_state, + config.topic + ); + completion_flag.mark_completed(); + return ControlAction::Pause; + } + log::info!("Sink stop signal received for topic: {}", config.topic); + *state.lock().unwrap() = ComponentState::Stopped; + completion_flag.mark_completed(); + ControlAction::Pause + } + SinkControlSignal::Close { completion_flag } => { + // Close can be executed in any state + log::info!("Sink close signal received for topic: {}", config.topic); + *state.lock().unwrap() = ComponentState::Closing; + Self::drain_remaining_data(producer, data_receiver, config); + Self::flush_producer(producer); + *state.lock().unwrap() = ComponentState::Closed; + completion_flag.mark_completed(); + ControlAction::Exit + } + SinkControlSignal::Checkpoint { + checkpoint_id, + completion_flag, + } => { + // Only Running state can Checkpoint + if !matches!(current_state, ComponentState::Running) { + let error = format!("Cannot take checkpoint in state: {:?}", current_state); + log::error!("{} for topic: {}", error, config.topic); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::info!( + "Checkpoint {} started for topic: {}", + checkpoint_id, + config.topic + ); + *state.lock().unwrap() = ComponentState::Checkpointing; + Self::drain_remaining_data(producer, data_receiver, config); + Self::flush_producer(producer); + completion_flag.mark_completed(); + ControlAction::Continue + } + SinkControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag, + } => { + // Only Checkpointing state can CheckpointFinish + if !matches!(current_state, ComponentState::Checkpointing) { + let error = format!("Cannot finish checkpoint in state: {:?}", current_state); + log::error!("{} for topic: {}", error, config.topic); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + log::info!( + "Checkpoint {} finish for topic: {}", + checkpoint_id, + config.topic + ); + *state.lock().unwrap() = ComponentState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + SinkControlSignal::Flush { completion_flag } => { + log::info!("Sink flush signal received for topic: {}", config.topic); + Self::drain_remaining_data(producer, data_receiver, config); + Self::flush_producer(producer); + completion_flag.mark_completed(); + ControlAction::Continue + } + } + } + + /// Drain all remaining data from data channel when closing, prevents leakage + fn drain_remaining_data( + producer: &ThreadedProducer, + data_receiver: &crossbeam_channel::Receiver, + config: &KafkaProducerConfig, + ) { + let mut drained_count = 0; + + // Non-blocking drain and send all remaining data + while let Ok(data) = data_receiver.try_recv() { + Self::send_message(producer, data, config); + drained_count += 1; + } + + if drained_count > 0 { + log::info!( + "Drained {} remaining messages before closing for topic: {}", + drained_count, + config.topic + ); + } + } + + /// Flush Kafka Producer + fn flush_producer(producer: &ThreadedProducer) { + let _ = producer.flush(std::time::Duration::from_millis(DEFAULT_FLUSH_TIMEOUT_MS)); + } + + // ==================== Data Layer Functions ==================== + + /// Send single message + /// + /// ThreadedProducer handles batching internally, no manual batching needed. + #[inline] + fn send_message( + producer: &ThreadedProducer, + data: BufferOrEvent, + config: &KafkaProducerConfig, + ) { + // Use into_buffer() to take ownership, avoids extra copy + if let Some(payload) = data.into_buffer() { + let payload_str = String::from_utf8_lossy(&payload); + log::info!( + "Sending to Kafka topic '{}': len={}, payload={}", + config.topic, + payload.len(), + payload_str + ); + + let mut record: BaseRecord<'_, (), Vec> = + BaseRecord::to(&config.topic).payload(&payload); + + if let Some(partition) = config.partition { + record = record.partition(partition); + } + + if let Err((e, _)) = producer.send(record) { + log::error!( + "Failed to send message to Kafka topic {}: {}", + config.topic, + e + ); + } + } + } +} + +// ==================== OutputSink Trait Implementation ==================== + +impl OutputSink for KafkaOutputSink { + // -------------------- init -------------------- + + fn init_with_context( + &mut self, + init_context: &crate::runtime::taskexecutor::InitContext, + ) -> Result<(), Box> { + // init_with_context is the only method that sets state in caller thread (runloop thread not started yet) + if !matches!(*self.state.lock().unwrap(), ComponentState::Uninitialized) { + return Ok(()); + } + + // Validate configuration + if self.config.bootstrap_servers.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Kafka bootstrap_servers is empty", + ))); + } + if self.config.topic.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Kafka topic is empty", + ))); + } + + // Create Channels + let (data_sender, data_receiver) = crossbeam_channel::bounded(DEFAULT_CHANNEL_CAPACITY); + let (control_sender, control_receiver) = crossbeam_channel::bounded(10); + self.data_sender = Some(data_sender); + self.control_sender = Some(control_sender); + + // Create Kafka producer and start thread + let producer = self.config.create_producer()?; + let config_clone = self.config.clone(); + let state_clone = self.state.clone(); + + let thread_name = format!("kafka-sink-{}-{}", self.sink_id, self.config.topic); + let thread_handle = std::thread::Builder::new() + .name(thread_name.clone()) + .spawn(move || { + Self::send_thread_loop( + producer, + data_receiver, + control_receiver, + state_clone, + config_clone, + ); + }) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to start thread: {}", + e + ))) + })?; + + // Register thread group to InitContext + use crate::runtime::processor::wasm::thread_pool::{ThreadGroup, ThreadGroupType}; + let mut output_thread_group = ThreadGroup::new( + ThreadGroupType::OutputSink(self.sink_id), + format!("OutputSink-{}", self.sink_id), + ); + output_thread_group.add_thread(thread_handle); + init_context.register_thread_group(output_thread_group); + + // Note: thread handle has been moved to thread group, no longer stored in send_thread + // When closing, thread management is done through TaskHandle + self.send_thread = None; + // init_with_context is the only place that sets state in caller thread + *self.state.lock().unwrap() = ComponentState::Initialized; + Ok(()) + } + + // -------------------- start -------------------- + + fn start(&mut self) -> Result<(), Box> { + // Don't check state in main thread, let runloop thread's handle_control_signal handle it + // Send signal to runloop thread, let runloop thread set state + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + control_sender + .send(SinkControlSignal::Start { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to send start signal: {}", + e + ))) + })?; + } + + // Wait with timeout retry for runloop thread to complete + self.wait_with_retry(&completion_flag, "Start")?; + + log::debug!( + "KafkaOutputSink started: sink_id={}, topic={}", + self.sink_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- stop -------------------- + + fn stop(&mut self) -> Result<(), Box> { + // Don't check state in main thread, let runloop thread's handle_control_signal handle it + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + control_sender + .send(SinkControlSignal::Stop { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to send stop signal: {}", + e + ))) + })?; + } + + // Wait with timeout retry for runloop thread to complete + self.wait_with_retry(&completion_flag, "Stop")?; + + log::info!( + "KafkaOutputSink stopped: sink_id={}, topic={}", + self.sink_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- checkpoint -------------------- + + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Don't check state in main thread, let runloop thread's handle_control_signal handle it + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SinkControlSignal::Checkpoint { + checkpoint_id, + completion_flag: completion_flag.clone(), + }; + control_sender + .send(signal) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Checkpoint signal failed: {}", + e + ))) + })?; + } + + self.wait_with_retry(&completion_flag, "Checkpoint")?; + + log::info!( + "Checkpoint {} started: sink_id={}, topic={}", + checkpoint_id, + self.sink_id, + self.config.topic + ); + Ok(()) + } + + fn finish_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Don't check state in main thread, let runloop thread's handle_control_signal handle it + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SinkControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag: completion_flag.clone(), + }; + control_sender + .send(signal) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to send checkpoint finish signal: {}", + e + ))) + })?; + } + + // Wait with timeout retry for runloop thread to complete + self.wait_with_retry(&completion_flag, "CheckpointFinish")?; + + log::info!( + "Checkpoint {} finished: sink_id={}, topic={}", + checkpoint_id, + self.sink_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- close -------------------- + + fn close(&mut self) -> Result<(), Box> { + // State check (read only) + if matches!(*self.state.lock().unwrap(), ComponentState::Closed) { + return Ok(()); + } + + // Send signal to runloop thread, let runloop thread set state + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + let signal = SinkControlSignal::Close { + completion_flag: completion_flag.clone(), + }; + if control_sender.send(signal).is_ok() { + // Wait with timeout retry for runloop thread to complete + // Close operation allows failure (thread may have exited), so ignore errors + let _ = self.wait_with_retry(&completion_flag, "Close"); + } + } + + // Note: thread handle has been moved to thread group, managed uniformly by TaskHandle + // No need to join here, thread group will wait uniformly in TaskHandle + + // Clean up resources + self.data_sender.take(); + self.data_receiver.take(); + self.control_sender.take(); + self.control_receiver.take(); + + log::info!( + "KafkaOutputSink closed: sink_id={}, topic={}", + self.sink_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- collect -------------------- + + fn collect(&mut self, data: BufferOrEvent) -> Result<(), Box> { + // Print current state + let state = self.state.lock().unwrap().clone(); + let data_sender_exists = self.data_sender.is_some(); + log::info!( + "KafkaOutputSink collect: sink_id={}, topic={}, state={:?}, data_sender_exists={}", + self.sink_id, + self.config.topic, + state, + data_sender_exists + ); + + // Don't check state in main thread, send data directly to channel + // If runloop is not running, data will be queued in channel + if let Some(ref sender) = self.data_sender { + sender + .send(data) + .map_err(|e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::BrokenPipe, + format!("Send failed: {}", e), + )) + })?; + } + + Ok(()) + } + + // -------------------- restore_state -------------------- + + fn restore_state( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + log::info!( + "Restoring state from checkpoint {} for topic: {}", + checkpoint_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- flush -------------------- + + fn flush(&mut self) -> Result<(), Box> { + // State check: if already closed, return error directly + if matches!(*self.state.lock().unwrap(), ComponentState::Closed) { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::NotConnected, + "Flush aborted: component already closed", + ))); + } + + // Send signal to runloop thread, let runloop thread handle it + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref control_sender) = self.control_sender { + control_sender + .send(SinkControlSignal::Flush { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to send flush signal: {}", + e + ))) + })?; + } + + // Wait with timeout retry for runloop thread to complete + self.wait_with_retry(&completion_flag, "Flush")?; + + log::info!( + "KafkaOutputSink flushed: sink_id={}, topic={}", + self.sink_id, + self.config.topic + ); + Ok(()) + } + + // -------------------- box_clone -------------------- + + fn box_clone(&self) -> Box { + // Create a new KafkaOutputSink with the same config and sink_id + // Note: cloned sink is uninitialized, needs to call init_with_context + Box::new(KafkaOutputSink::new(self.config.clone(), self.sink_id)) + } +} diff --git a/src/runtime/output/protocol/kafka/producer_config.rs b/src/runtime/output/protocol/kafka/producer_config.rs new file mode 100644 index 00000000..e59166a0 --- /dev/null +++ b/src/runtime/output/protocol/kafka/producer_config.rs @@ -0,0 +1,103 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Kafka Producer Config - Kafka producer configuration structure +// +// Defines configuration options for Kafka output sink +// +// Note: Each output sink only supports one topic and one partition + +use std::collections::HashMap; + +/// KafkaProducerConfig - Kafka producer configuration +/// +/// Contains all configuration options for Kafka output sink +/// +/// Each output sink only supports one topic and one partition +#[derive(Debug, Clone)] +pub struct KafkaProducerConfig { + /// Bootstrap servers (server addresses) + /// Can be a single string (comma-separated) or a list of strings + pub bootstrap_servers: Vec, + /// Topic name (single) + pub topic: String, + /// Partition ID (single, optional, if not specified Kafka will assign automatically) + pub partition: Option, + /// Other configuration items (key-value pairs) + pub properties: HashMap, +} + +impl KafkaProducerConfig { + /// Create new Kafka producer configuration + /// + /// # Arguments + /// - `bootstrap_servers`: Kafka broker address list + /// - `topic`: Topic name (single) + /// - `partition`: Partition ID (single, optional) + /// - `properties`: Other configuration items + pub fn new( + bootstrap_servers: Vec, + topic: String, + partition: Option, + properties: HashMap, + ) -> Self { + Self { + bootstrap_servers, + topic, + partition, + properties, + } + } + + /// Get bootstrap servers string (comma-separated) + pub fn bootstrap_servers_str(&self) -> String { + self.bootstrap_servers.join(",") + } + + /// Create Kafka producer + /// + /// # Returns + /// - `Ok(ThreadedProducer)`: Successfully created producer + /// - `Err(...)`: Creation failed + pub fn create_producer( + &self, + ) -> Result< + rdkafka::producer::ThreadedProducer, + Box, + > { + use rdkafka::config::ClientConfig; + use rdkafka::producer::{DefaultProducerContext, ThreadedProducer}; + + let mut client_config = ClientConfig::new(); + + // Set bootstrap servers (required) + client_config.set("bootstrap.servers", self.bootstrap_servers_str()); + + // Apply user-defined configuration + for (key, value) in &self.properties { + client_config.set(key, value); + } + + // Create producer + let producer: ThreadedProducer = + client_config + .create() + .map_err(|e| -> Box { + Box::new(std::io::Error::other(format!( + "Failed to create Kafka producer: {}", + e + ))) + })?; + + Ok(producer) + } +} diff --git a/src/runtime/output/protocol/mod.rs b/src/runtime/output/protocol/mod.rs new file mode 100644 index 00000000..20e4d1c5 --- /dev/null +++ b/src/runtime/output/protocol/mod.rs @@ -0,0 +1,17 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Output Protocol - Output protocol module +// +// Provides implementations of various output protocols + +pub mod kafka; diff --git a/src/runtime/processor/mod.rs b/src/runtime/processor/mod.rs new file mode 100644 index 00000000..71fd4449 --- /dev/null +++ b/src/runtime/processor/mod.rs @@ -0,0 +1,16 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod wasm; + +#[cfg(feature = "python")] +pub mod python; diff --git a/src/runtime/processor/python/mod.rs b/src/runtime/processor/python/mod.rs new file mode 100644 index 00000000..8c4dc433 --- /dev/null +++ b/src/runtime/processor/python/mod.rs @@ -0,0 +1,22 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// python Processor module +// +// This module provides a python-specific processor implementation +// that wraps the wasm processor for executing python code compiled to wasm. + +pub mod python_host; +pub mod python_service; + +pub use python_host::get_python_engine_and_component; +pub use python_service::PythonService; diff --git a/src/runtime/processor/python/python_host.rs b/src/runtime/processor/python/python_host.rs new file mode 100644 index 00000000..f69227a8 --- /dev/null +++ b/src/runtime/processor/python/python_host.rs @@ -0,0 +1,336 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Python WASM Host +// +// This module provides python-specific wasm host implementation +// that manages a global engine and component for python wasm runtime. +// Configuration is used to specify paths instead of hardcoding. + +use crate::config::PythonConfig; +use std::path::PathBuf; +use std::sync::{Arc, OnceLock, RwLock}; +use wasmtime::{Engine, component::Component}; + +// Global Python WASM Engine (thread-safe, shareable) +static GLOBAL_PYTHON_ENGINE: OnceLock> = OnceLock::new(); + +// Global Python WASM Component (thread-safe, shareable) +static GLOBAL_PYTHON_COMPONENT: OnceLock> = OnceLock::new(); + +// Global Python Configuration (initialized once) +static GLOBAL_PYTHON_CONFIG: OnceLock> = OnceLock::new(); + +/// Initialize the Python host with configuration +/// +/// This must be called before `get_python_engine_and_component`. +/// The configuration specifies paths for WASM file and cache directory. +/// +/// # Arguments +/// - `config`: Python runtime configuration +/// +/// # Returns +/// - `Ok(())`: Initialization successful +/// - `Err(...)`: Configuration already set or validation failed +pub fn initialize_config(config: &PythonConfig) -> anyhow::Result<()> { + let wasm_path = config.wasm_path_buf(); + if !wasm_path.exists() { + return Err(anyhow::anyhow!( + "Python WASM file not found at: {}. Please ensure the file exists or build it first with: cd python/functionstream-runtime && make build", + wasm_path.display() + )); + } + + // Store configuration + match GLOBAL_PYTHON_CONFIG.set(RwLock::new(config.clone())) { + Ok(_) => { + log::info!( + "[Python Host] Configuration initialized: wasm_path={}, cache_dir={}, enable_cache={}", + config.wasm_path, + config.cache_dir, + config.enable_cache + ); + Ok(()) + } + Err(_) => { + // Configuration already set, check if it matches + if let Some(existing) = GLOBAL_PYTHON_CONFIG.get() { + let existing_config = existing + .read() + .map_err(|e| anyhow::anyhow!("Failed to read existing configuration: {}", e))?; + if existing_config.wasm_path == config.wasm_path + && existing_config.cache_dir == config.cache_dir + && existing_config.enable_cache == config.enable_cache + { + log::debug!("[Python Host] Configuration already set with same values"); + return Ok(()); + } + } + Err(anyhow::anyhow!( + "Python host configuration has already been initialized with different values" + )) + } + } +} + +/// Get the current Python configuration +/// +/// Returns the default configuration if not explicitly initialized. +fn get_config() -> PythonConfig { + GLOBAL_PYTHON_CONFIG + .get() + .and_then(|lock| lock.read().ok()) + .map(|config| config.clone()) + .unwrap_or_else(PythonConfig::default) +} + +/// Load Python WASM bytes from the configured path +fn load_python_wasm_bytes(config: &PythonConfig) -> anyhow::Result> { + let wasm_path = config.wasm_path_buf(); + + if !wasm_path.exists() { + return Err(anyhow::anyhow!( + "Python WASM file not found at: {}. Please ensure the file exists or build it first with: cd python/functionstream-runtime && make build", + wasm_path.display() + )); + } + + std::fs::read(&wasm_path).map_err(|e| { + anyhow::anyhow!( + "Failed to read Python WASM file from {}: {}", + wasm_path.display(), + e + ) + }) +} + +/// Get or create global Python WASM Engine +/// +/// The engine is initialized on first call and reused for all subsequent calls. +fn get_global_python_engine() -> anyhow::Result> { + if let Some(engine) = GLOBAL_PYTHON_ENGINE.get() { + return Ok(Arc::clone(engine)); + } + + let engine = GLOBAL_PYTHON_ENGINE.get_or_init(|| { + let engine_start = std::time::Instant::now(); + let mut config = wasmtime::Config::new(); + config.wasm_component_model(true); + config.async_support(false); + config.cranelift_opt_level(wasmtime::OptLevel::Speed); + config.debug_info(false); + config.generate_address_map(false); + config.parallel_compilation(true); + + let engine = Engine::new(&config).unwrap_or_else(|e| { + panic!("Failed to create global Python WASM engine: {}", e); + }); + + let engine_elapsed = engine_start.elapsed().as_secs_f64(); + log::debug!( + "[Python Host] Global Engine created: {:.3}s", + engine_elapsed + ); + + Arc::new(engine) + }); + + Ok(Arc::clone(engine)) +} + +/// Load precompiled component from cache if available +fn load_precompiled_component(engine: &Engine, config: &PythonConfig) -> Option { + if !config.enable_cache { + log::debug!("[Python Host] Component caching is disabled"); + return None; + } + + let cache_path = config.cwasm_cache_path(); + + if !cache_path.exists() { + log::debug!( + "[Python Host] No cached component found at: {}", + cache_path.display() + ); + return None; + } + + match std::fs::read(&cache_path) { + Ok(precompiled_bytes) => { + log::debug!( + "[Python Host] Loading precompiled component from cache: {}", + cache_path.display() + ); + match unsafe { Component::deserialize(engine, &precompiled_bytes) } { + Ok(component) => { + log::debug!( + "[Python Host] Precompiled component loaded successfully from cache (size: {} KB)", + precompiled_bytes.len() / 1024 + ); + return Some(component); + } + Err(e) => { + log::warn!( + "[Python Host] Failed to deserialize cached component: {}. Removing invalid cache and falling back to compilation.", + e + ); + // Remove invalid cache file + let _ = std::fs::remove_file(&cache_path); + } + } + } + Err(e) => { + log::warn!( + "[Python Host] Failed to read cached component: {}. Falling back to compilation.", + e + ); + } + } + + None +} + +/// Save precompiled component to cache +fn save_precompiled_component( + engine: &Engine, + wasm_bytes: &[u8], + config: &PythonConfig, +) -> anyhow::Result { + if !config.enable_cache { + return Err(anyhow::anyhow!("Component caching is disabled")); + } + + let cache_dir = config.cache_dir_buf(); + let cache_path = config.cwasm_cache_path(); + + // Create cache directory if it doesn't exist + std::fs::create_dir_all(&cache_dir).map_err(|e| { + anyhow::anyhow!( + "Failed to create cache directory {}: {}", + cache_dir.display(), + e + ) + })?; + + log::debug!("[Python Host] Precompiling component for cache..."); + + // Precompile component + let precompiled_bytes = engine + .precompile_component(wasm_bytes) + .map_err(|e| anyhow::anyhow!("Failed to precompile component: {}", e))?; + + // Write to cache file + std::fs::write(&cache_path, &precompiled_bytes).map_err(|e| { + anyhow::anyhow!( + "Failed to write cached component to {}: {}", + cache_path.display(), + e + ) + })?; + + log::debug!( + "[Python Host] Cached precompiled component to {} (size: {} KB)", + cache_path.display(), + precompiled_bytes.len() / 1024 + ); + + Ok(cache_path) +} + +/// Get or create global Python WASM Component +/// +/// The component is loaded from cache if available, otherwise compiled from the WASM file. +/// If cache doesn't exist and caching is enabled, the component is compiled and saved to cache. +/// Configuration paths are used instead of hardcoded paths. +fn get_global_python_component() -> anyhow::Result> { + if let Some(component) = GLOBAL_PYTHON_COMPONENT.get() { + return Ok(Arc::clone(component)); + } + + let config = get_config(); + + let component = GLOBAL_PYTHON_COMPONENT.get_or_init(|| { + let component_start = std::time::Instant::now(); + let engine = get_global_python_engine().unwrap_or_else(|e| { + panic!("Failed to get global Python engine: {}", e); + }); + + // Try to load precompiled component from cache first + if let Some(precompiled) = load_precompiled_component(&engine, &config) { + let component_elapsed = component_start.elapsed().as_secs_f64(); + log::debug!( + "[Python Host] Precompiled component loaded from cache: {:.3}s", + component_elapsed + ); + return Arc::new(precompiled); + } + + // Cache doesn't exist or is invalid, compile from WASM file + log::debug!( + "[Python Host] Loading Python WASM component from: {}", + config.wasm_path + ); + let wasm_bytes = load_python_wasm_bytes(&config).unwrap_or_else(|e| { + panic!("Failed to load Python WASM bytes: {}", e); + }); + + let compile_start = std::time::Instant::now(); + let component = Component::from_binary(&engine, &wasm_bytes).unwrap_or_else(|e| { + let error_msg = format!("Failed to parse Python WASM component: {}", e); + log::error!("{}", error_msg); + log::error!( + "WASM bytes preview (first 100 bytes): {:?}", + wasm_bytes.iter().take(100).collect::>() + ); + panic!("{}", error_msg); + }); + + let compile_elapsed = compile_start.elapsed().as_secs_f64(); + log::debug!( + "[Python Host] Component compiled: {:.3}s (size: {} KB)", + compile_elapsed, + wasm_bytes.len() / 1024 + ); + + // Save precompiled component to cache for future use + if config.enable_cache { + if let Err(e) = save_precompiled_component(&engine, &wasm_bytes, &config) { + log::warn!( + "[Python Host] Failed to save precompiled component to cache: {}", + e + ); + log::warn!("[Python Host] Component will be recompiled on next run"); + } + } + + let component_elapsed = component_start.elapsed().as_secs_f64(); + log::debug!("[Python Host] Component ready: {:.3}s", component_elapsed); + + Arc::new(component) + }); + + Ok(Arc::clone(component)) +} + +/// Get global Python WASM Engine and Component +/// +/// This function returns both the engine and component, ensuring they are initialized. +/// The component is loaded from the configured path on first call. +/// +/// # Important +/// Call `initialize_config` before this function to set custom paths. +/// If not called, default configuration will be used. +pub fn get_python_engine_and_component() -> anyhow::Result<(Arc, Arc)> { + let engine = get_global_python_engine()?; + let component = get_global_python_component()?; + Ok((engine, component)) +} diff --git a/src/runtime/processor/python/python_service.rs b/src/runtime/processor/python/python_service.rs new file mode 100644 index 00000000..3fde05da --- /dev/null +++ b/src/runtime/processor/python/python_service.rs @@ -0,0 +1,63 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Python Service +// +// This module provides a service for initializing Python WASM runtime at startup +// with configuration support + +use crate::config::GlobalConfig; +use anyhow::{Context, Result}; +use log::info; + +/// Python Service for initializing Python WASM runtime +pub struct PythonService; + +impl PythonService { + /// Initialize Python WASM runtime with configuration + /// + /// This method: + /// 1. Initializes the configuration for Python host + /// 2. Validates that the WASM file exists at the configured path + /// 3. Loads and compiles the Python WASM component (or loads from cache) + /// + /// # Arguments + /// - `config`: Global configuration containing Python runtime settings + /// + /// # Returns + /// - `Ok(())`: Initialization successful + /// - `Err(...)`: Initialization failed (e.g., WASM file not found) + pub fn initialize(config: &GlobalConfig) -> Result<()> { + info!("Initializing Python WASM runtime..."); + + let python_config = &config.python; + + info!( + "Python WASM configuration: wasm_path={}, cache_dir={}, enable_cache={}", + python_config.wasm_path, python_config.cache_dir, python_config.enable_cache + ); + + // Initialize configuration for Python host + // This validates the WASM file exists and stores the configuration + super::python_host::initialize_config(python_config) + .context("Failed to initialize Python host configuration")?; + + // Pre-initialize the Python WASM engine and component + // This will load and compile the WASM component, or load from cache if available + let (_engine, _component) = super::python_host::get_python_engine_and_component() + .context("Failed to initialize Python WASM engine and component")?; + + info!("Python WASM runtime initialized successfully"); + + Ok(()) + } +} diff --git a/src/runtime/processor/wasm/mod.rs b/src/runtime/processor/wasm/mod.rs new file mode 100644 index 00000000..73e58b22 --- /dev/null +++ b/src/runtime/processor/wasm/mod.rs @@ -0,0 +1,18 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod thread_pool; +pub mod wasm_cache; +pub mod wasm_host; +pub mod wasm_processor; +pub mod wasm_processor_trait; +pub mod wasm_task; diff --git a/src/runtime/processor/wasm/thread_pool.rs b/src/runtime/processor/wasm/thread_pool.rs new file mode 100644 index 00000000..68c7e77a --- /dev/null +++ b/src/runtime/processor/wasm/thread_pool.rs @@ -0,0 +1,543 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::runtime::common::ComponentState; +use crate::runtime::processor::wasm::wasm_task::WasmTask; +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::time::Duration; + +pub struct TaskThreadPool { + tasks: Arc>>, + shutdown: Arc, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ThreadGroupType { + MainRunloop, + InputSource(usize), + OutputSink(usize), + Cleanup, +} + +pub struct ThreadInfo { + handle: thread::JoinHandle<()>, + is_running: Arc, +} + +impl ThreadInfo { + fn new(handle: thread::JoinHandle<()>) -> Self { + Self { + handle, + is_running: Arc::new(AtomicBool::new(true)), + } + } + + fn is_finished(&self) -> bool { + self.handle.is_finished() + } + + fn join(self) -> Result<(), Box> { + self.is_running.store(false, Ordering::Relaxed); + self.handle + .join() + .map_err(|e| format!("Thread join error: {:?}", e))?; + Ok(()) + } +} + +pub struct ThreadGroup { + pub group_type: ThreadGroupType, + pub group_name: String, + pub threads: Vec, + pub is_running: Arc, +} + +impl ThreadGroup { + pub fn new(group_type: ThreadGroupType, group_name: String) -> Self { + Self { + group_type, + group_name, + threads: Vec::new(), + is_running: Arc::new(AtomicBool::new(true)), + } + } + + pub fn add_thread(&mut self, handle: thread::JoinHandle<()>) { + self.threads.push(ThreadInfo::new(handle)); + } + + pub fn is_finished(&self) -> bool { + self.threads.is_empty() || self.threads.iter().all(|t| t.is_finished()) + } + + pub fn thread_count(&self) -> usize { + self.threads.len() + } + + pub fn running_thread_count(&self) -> usize { + self.threads.iter().filter(|t| !t.is_finished()).count() + } + + pub fn join_all(&mut self) -> Result<(), Box> { + self.is_running.store(false, Ordering::Relaxed); + for thread_info in std::mem::take(&mut self.threads) { + thread_info.join()?; + } + Ok(()) + } + + pub fn join_all_with_timeout( + &mut self, + timeout: Duration, + ) -> Result<(), Box> { + let start = std::time::Instant::now(); + self.is_running.store(false, Ordering::Relaxed); + + let mut remaining_threads = std::mem::take(&mut self.threads); + while !remaining_threads.is_empty() { + if start.elapsed() > timeout { + return Err(format!( + "Timeout waiting for {} threads in group '{}'", + remaining_threads.len(), + self.group_name + ) + .into()); + } + + remaining_threads.retain(|t| if t.is_finished() { false } else { true }); + + if !remaining_threads.is_empty() { + thread::sleep(Duration::from_millis(10)); + } + } + + Ok(()) + } +} + +struct TaskHandle { + task: Arc>, + thread_groups: Vec, +} + +impl TaskHandle { + fn new(task: Arc>) -> Self { + Self { + task, + thread_groups: Vec::new(), + } + } + + fn add_thread_group(&mut self, thread_group: ThreadGroup) { + self.thread_groups.push(thread_group); + } + + fn get_all_thread_groups(&self) -> &[ThreadGroup] { + &self.thread_groups + } + + fn join_all_threads( + &mut self, + timeout: Option, + ) -> Result<(), Box> { + if let Some(main_thread) = self + .thread_groups + .iter_mut() + .find(|g| matches!(g.group_type, ThreadGroupType::MainRunloop)) + { + if let Some(timeout) = timeout { + let _ = main_thread.join_all_with_timeout(timeout); + } else { + let _ = main_thread.join_all(); + } + } + + for thread_group in &mut self.thread_groups { + if let Some(timeout) = timeout { + let _ = thread_group.join_all_with_timeout(timeout); + } else { + let _ = thread_group.join_all(); + } + } + + Ok(()) + } +} + +impl TaskThreadPool { + pub fn new() -> Self { + Self { + tasks: Arc::new(Mutex::new(HashMap::new())), + shutdown: Arc::new(AtomicBool::new(false)), + } + } + + pub fn submit(&self, task: Arc>) -> Result> { + if self.shutdown.load(Ordering::Relaxed) { + return Err("Thread pool is shutdown".into()); + } + + let task_id = { + let task_guard = task.lock().unwrap(); + task_guard.get_name().to_string() + }; + + let tasks_clone = self.tasks.clone(); + let task_id_clone = task_id.clone(); + let shutdown_flag = self.shutdown.clone(); + + let task_arc_for_cleanup = task.clone(); + let cleanup_thread = thread::Builder::new() + .name(format!("TaskCleanup-{}", task_id_clone)) + .spawn(move || { + Self::cleanup_task_thread( + task_arc_for_cleanup, + tasks_clone, + task_id_clone, + shutdown_flag, + ); + }) + .map_err(|e| format!("Failed to spawn cleanup thread: {}", e))?; + + let mut cleanup_thread_group = + ThreadGroup::new(ThreadGroupType::Cleanup, format!("TaskCleanup-{}", task_id)); + cleanup_thread_group.add_thread(cleanup_thread); + + let thread_groups = { + let mut task_guard = task.lock().unwrap(); + task_guard.take_thread_groups() + }; + + let mut tasks = self.tasks.lock().unwrap(); + let mut handle = TaskHandle::new(task); + + if let Some(groups) = thread_groups { + for group in groups { + handle.add_thread_group(group); + } + } + + handle.add_thread_group(cleanup_thread_group); + + tasks.insert(task_id.clone(), handle); + + Ok(task_id) + } + + fn cleanup_task_thread( + task_arc: Arc>, + tasks: Arc>>, + task_id: String, + shutdown_flag: Arc, + ) { + loop { + let state = { + let task_guard = task_arc.lock().unwrap(); + task_guard.get_state() + }; + + if matches!(state, ComponentState::Closed | ComponentState::Error { .. }) { + break; + } + + thread::sleep(Duration::from_millis(100)); + } + + let mut attempts = 0; + const MAX_ATTEMPTS: usize = 50; + + loop { + let task_guard = task_arc.lock().unwrap(); + let state = task_guard.get_state(); + drop(task_guard); + + if matches!(state, ComponentState::Closed) { + break; + } + + attempts += 1; + if attempts >= MAX_ATTEMPTS { + log::warn!("Timeout waiting for task {} to close", task_id); + break; + } + + thread::sleep(Duration::from_millis(100)); + } + + if !shutdown_flag.load(Ordering::Relaxed) { + let mut tasks_guard = tasks.lock().unwrap(); + tasks_guard.remove(&task_id); + } + } + + pub fn get_task(&self, task_id: &str) -> Option>> { + let tasks = self.tasks.lock().unwrap(); + tasks.get(task_id).map(|handle| handle.task.clone()) + } + + pub fn cancel_task(&self, task_id: &str) -> Result<(), Box> { + if let Some(task) = self.get_task(task_id) { + let task_guard = task.lock().unwrap(); + task_guard.cancel().map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to cancel task: {}", + e + ))) as Box + })?; + Ok(()) + } else { + Err(format!("Task {} not found", task_id).into()) + } + } + + pub fn get_all_task_ids(&self) -> Vec { + let tasks = self.tasks.lock().unwrap(); + tasks.keys().cloned().collect() + } + + pub fn task_count(&self) -> usize { + let tasks = self.tasks.lock().unwrap(); + tasks.len() + } + + pub fn wait_for_all_tasks( + &self, + timeout: Option, + ) -> Result<(), Box> { + let start = std::time::Instant::now(); + loop { + let count = self.task_count(); + if count == 0 { + return Ok(()); + } + + if let Some(timeout) = timeout + && start.elapsed() > timeout + { + return Err(format!("Timeout waiting for {} tasks to complete", count).into()); + } + + thread::sleep(Duration::from_millis(100)); + } + } + + pub fn cleanup_finished_threads(&self) -> usize { + let mut cleaned_count = 0; + let tasks = self.tasks.lock().unwrap(); + let task_ids: Vec = tasks.keys().cloned().collect(); + drop(tasks); + + for task_id in task_ids { + if let Some(task_arc) = self.get_task(&task_id) { + let task_guard = task_arc.lock().unwrap(); + + let state = task_guard.get_state(); + let is_finished = + matches!(state, ComponentState::Closed | ComponentState::Error { .. }); + + if is_finished { + cleaned_count += 1; + } + drop(task_guard); + } + } + + cleaned_count + } + + pub fn force_cleanup_all_threads( + &self, + timeout: Duration, + ) -> Result> { + let mut cleaned_count = 0; + let tasks = self.tasks.lock().unwrap(); + let task_ids: Vec = tasks.keys().cloned().collect(); + drop(tasks); + + let start = std::time::Instant::now(); + for task_id in task_ids { + if start.elapsed() > timeout { + break; + } + + if let Some(task_arc) = self.get_task(&task_id) { + let task_guard = task_arc.lock().unwrap(); + let state = task_guard.get_state(); + if matches!(state, ComponentState::Closed | ComponentState::Error { .. }) { + cleaned_count += 1; + } + drop(task_guard); + } + } + + Ok(cleaned_count) + } + + pub fn shutdown(&self) -> Result<(), Box> { + self.shutdown.store(true, Ordering::Relaxed); + + let task_ids: Vec = self.get_all_task_ids(); + for task_id in task_ids { + let _ = self.cancel_task(&task_id); + } + + self.wait_for_all_tasks(Some(Duration::from_secs(30)))?; + + let cleaned = self.cleanup_finished_threads(); + if cleaned > 0 { + log::info!("Cleaned up {} finished threads", cleaned); + } + + let remaining = self.task_count(); + if remaining > 0 { + log::warn!("{} tasks still remain, forcing thread cleanup", remaining); + let _ = self.force_cleanup_all_threads(Duration::from_secs(5)); + } + + self.wait_for_cleanup_threads(Duration::from_secs(10))?; + + let final_count = self.task_count(); + if final_count > 0 { + return Err(format!("{} tasks still remain after shutdown", final_count).into()); + } + + Ok(()) + } + + fn wait_for_cleanup_threads( + &self, + timeout: Duration, + ) -> Result<(), Box> { + let start = std::time::Instant::now(); + + loop { + let tasks = self.tasks.lock().unwrap(); + let all_cleanup_done = tasks.values().all(|handle| { + handle + .thread_groups + .iter() + .find(|g| matches!(g.group_type, ThreadGroupType::Cleanup)) + .map(|g| g.is_finished()) + .unwrap_or(true) + }); + drop(tasks); + + if all_cleanup_done { + return Ok(()); + } + + if start.elapsed() > timeout { + return Err("Timeout waiting for cleanup threads".into()); + } + + thread::sleep(Duration::from_millis(100)); + } + } + + pub fn shutdown_now(&self) { + self.shutdown.store(true, Ordering::Relaxed); + + let task_ids: Vec = self.get_all_task_ids(); + for task_id in task_ids { + let _ = self.cancel_task(&task_id); + } + + let _ = self.cleanup_finished_threads(); + } + + pub fn get_thread_health_status(&self) -> ThreadHealthStatus { + let tasks = self.tasks.lock().unwrap(); + let mut total_tasks = 0; + let mut alive_threads = 0; + let mut finished_threads = 0; + let mut zombie_threads = 0; + + for handle in tasks.values() { + total_tasks += 1; + + let mut task_alive_threads = 0; + let mut task_finished_threads = 0; + + for thread_group in &handle.thread_groups { + let running = thread_group.running_thread_count(); + let finished = thread_group.thread_count() - running; + task_alive_threads += running; + task_finished_threads += finished; + } + + if let Ok(task_guard) = handle.task.try_lock() { + let state = task_guard.get_state(); + let is_finished = + matches!(state, ComponentState::Closed | ComponentState::Error { .. }); + + if task_alive_threads > 0 { + alive_threads += task_alive_threads; + if is_finished { + zombie_threads += task_alive_threads; + } + } + if task_finished_threads > 0 { + finished_threads += task_finished_threads; + } + } + } + + ThreadHealthStatus { + total_tasks, + alive_threads, + finished_threads, + zombie_threads, + } + } + + pub fn is_shutdown(&self) -> bool { + self.shutdown.load(Ordering::Relaxed) + } +} + +#[derive(Debug, Clone)] +pub struct ThreadHealthStatus { + pub total_tasks: usize, + pub alive_threads: usize, + pub finished_threads: usize, + pub zombie_threads: usize, +} + +impl ThreadHealthStatus { + pub fn has_zombie_threads(&self) -> bool { + self.zombie_threads > 0 + } + + pub fn all_threads_recycled(&self) -> bool { + self.total_tasks == 0 + || (self.alive_threads == 0 && self.finished_threads == self.total_tasks) + } +} + +impl Default for TaskThreadPool { + fn default() -> Self { + Self::new() + } +} + +pub struct GlobalTaskThreadPool; + +impl GlobalTaskThreadPool { + pub fn get_or_create() -> Arc { + static POOL: std::sync::OnceLock> = std::sync::OnceLock::new(); + POOL.get_or_init(|| Arc::new(TaskThreadPool::default())) + .clone() + } +} diff --git a/src/runtime/processor/wasm/wasm_cache.rs b/src/runtime/processor/wasm/wasm_cache.rs new file mode 100644 index 00000000..ec67e58a --- /dev/null +++ b/src/runtime/processor/wasm/wasm_cache.rs @@ -0,0 +1,346 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::borrow::Cow; +use std::fs; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, OnceLock, RwLock}; +use std::time::{SystemTime, UNIX_EPOCH}; + +#[cfg(feature = "incremental-cache")] +use wasmtime::CacheStore; + +#[derive(Debug, Clone)] +pub struct WasmCacheConfig { + pub enabled: bool, + pub cache_dir: PathBuf, + pub max_size: u64, +} + +static GLOBAL_CACHE_CONFIG: OnceLock = OnceLock::new(); + +pub fn set_cache_config(config: WasmCacheConfig) { + let _ = GLOBAL_CACHE_CONFIG.set(config); +} + +pub fn get_cache_config() -> Option<&'static WasmCacheConfig> { + GLOBAL_CACHE_CONFIG.get() +} + +pub fn is_cache_enabled() -> bool { + GLOBAL_CACHE_CONFIG + .get() + .map(|config| config.enabled) + .unwrap_or(false) +} + +#[cfg(feature = "incremental-cache")] +#[derive(Debug)] +struct CacheEntry { + size: u64, + last_accessed: u64, +} + +#[cfg(feature = "incremental-cache")] +struct LruCacheState { + entries: lru::LruCache, CacheEntry>, + total_size: u64, + max_size: u64, +} + +impl std::fmt::Debug for LruCacheState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LruCacheState") + .field("total_size", &self.total_size) + .field("max_size", &self.max_size) + .field("entry_count", &self.entries.len()) + .finish() + } +} + +#[cfg(feature = "incremental-cache")] +#[derive(Debug)] +pub struct FileCacheStore { + cache_dir: PathBuf, + state: Arc>, +} + +#[cfg(feature = "incremental-cache")] +impl FileCacheStore { + pub fn new>(cache_dir: P, max_size: u64) -> anyhow::Result { + let cache_dir = cache_dir.as_ref().to_path_buf(); + + fs::create_dir_all(&cache_dir).map_err(|e| { + anyhow::anyhow!( + "Failed to create cache directory {}: {}", + cache_dir.display(), + e + ) + })?; + + let state = Arc::new(RwLock::new(LruCacheState { + entries: lru::LruCache::unbounded(), + total_size: 0, + max_size, + })); + + let store = Self { + cache_dir: cache_dir.clone(), + state, + }; + store.scan_existing_files()?; + + log::info!( + "FileCacheStore initialized at: {} (max_size: {} bytes)", + cache_dir.display(), + max_size + ); + + Ok(store) + } + + fn scan_existing_files(&self) -> anyhow::Result<()> { + let mut state = self.state.write().unwrap(); + let mut total_size = 0u64; + + if let Ok(entries) = fs::read_dir(&self.cache_dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() && path.extension() != Some(std::ffi::OsStr::new("tmp")) { + if let Ok(metadata) = fs::metadata(&path) { + let size = metadata.len(); + let modified = metadata + .modified() + .unwrap_or_else(|_| SystemTime::now()) + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { + if let Some(key) = self.filename_to_key(file_name) { + state.entries.put( + key, + CacheEntry { + size, + last_accessed: modified, + }, + ); + total_size += size; + } + } + } + } + } + } + + state.total_size = total_size; + log::info!( + "Scanned existing cache files: {} entries, {} bytes", + state.entries.len(), + total_size + ); + + Ok(()) + } + + fn filename_to_key(&self, filename: &str) -> Option> { + use base64::{Engine as _, engine::general_purpose}; + general_purpose::URL_SAFE_NO_PAD.decode(filename).ok() + } + + fn key_to_filename(&self, key: &[u8]) -> String { + use base64::{Engine as _, engine::general_purpose}; + general_purpose::URL_SAFE_NO_PAD.encode(key) + } + + fn get_file_path(&self, key: &[u8]) -> PathBuf { + let filename = self.key_to_filename(key); + self.cache_dir.join(filename) + } + + fn evict_until_under_limit(&self, required_space: u64) { + let mut files_to_delete = Vec::new(); + + { + let mut state = self.state.write().unwrap(); + let target_size = state.max_size.saturating_sub(required_space); + + while state.total_size > target_size { + if let Some((key, entry)) = state.entries.pop_lru() { + let file_path = self.get_file_path(&key); + files_to_delete.push((file_path, entry.size)); + state.total_size = state.total_size.saturating_sub(entry.size); + } else { + break; + } + } + } + + for (file_path, size) in files_to_delete { + if fs::remove_file(&file_path).is_ok() { + log::debug!( + "Evicted cache entry: {} ({} bytes)", + file_path.display(), + size + ); + } else { + log::warn!("Failed to remove cache file: {}", file_path.display()); + let mut state = self.state.write().unwrap(); + state.total_size = state.total_size.saturating_add(size); + } + } + } + + fn update_access_time(&self, key: &[u8]) { + let mut state = self.state.write().unwrap(); + if let Some(entry) = state.entries.get_mut(key) { + entry.last_accessed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + } + } +} + +#[cfg(feature = "incremental-cache")] +impl CacheStore for FileCacheStore { + fn get(&self, key: &[u8]) -> Option> { + let file_path = self.get_file_path(key); + + if !file_path.exists() { + return None; + } + + let data = match fs::read(&file_path) { + Ok(data) => data, + Err(e) => { + log::warn!("Failed to read cache file {}: {}", file_path.display(), e); + return None; + } + }; + + self.update_access_time(key); + log::debug!("Cache hit: {} ({} bytes)", file_path.display(), data.len()); + Some(Cow::Owned(data)) + } + + fn insert(&self, key: &[u8], value: Vec) -> bool { + let value_size = value.len() as u64; + let file_path = self.get_file_path(key); + let temp_path = file_path.with_extension(".tmp"); + + loop { + let mut state = self.state.write().unwrap(); + + if value_size > state.max_size { + log::warn!( + "Cache entry size {} exceeds max cache size {}, skipping", + value_size, + state.max_size + ); + return false; + } + + let existing_size = state.entries.get(key).map(|e| e.size).unwrap_or(0); + + let additional_size = value_size.saturating_sub(existing_size); + + if state.total_size + additional_size <= state.max_size { + drop(state); + break; + } + + drop(state); + self.evict_until_under_limit(value_size); + } + + if let Some(parent) = file_path.parent() { + if let Err(e) = fs::create_dir_all(parent) { + log::warn!( + "Failed to create cache directory {}: {}", + parent.display(), + e + ); + return false; + } + } + + let write_result = fs::File::create(&temp_path) + .and_then(|mut file| file.write_all(&value)) + .and_then(|_| fs::rename(&temp_path, &file_path)); + + match write_result { + Ok(_) => { + let mut state = self.state.write().unwrap(); + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let existing_size_final = state.entries.get(key).map(|e| e.size).unwrap_or(0); + + state.total_size = + state.total_size.saturating_sub(existing_size_final) + value_size; + state.entries.put( + key.to_vec(), + CacheEntry { + size: value_size, + last_accessed: now, + }, + ); + + log::debug!( + "Cache insert: {} ({} bytes), total: {} bytes", + file_path.display(), + value_size, + state.total_size + ); + true + } + Err(e) => { + log::warn!("Failed to write cache file {}: {}", file_path.display(), e); + let _ = fs::remove_file(&temp_path); + false + } + } + } +} + +#[cfg(feature = "incremental-cache")] +pub fn create_cache_store() -> Option> { + if !is_cache_enabled() { + log::info!("wasm cache is disabled in configuration"); + return None; + } + + let config = get_cache_config()?; + match FileCacheStore::new(&config.cache_dir, config.max_size) { + Ok(store) => { + log::info!( + "wasm cache store created at: {} (max_size: {} bytes)", + config.cache_dir.display(), + config.max_size + ); + Some(Arc::new(store)) + } + Err(e) => { + log::warn!("Failed to create wasm cache store: {}", e); + None + } + } +} + +#[cfg(not(feature = "incremental-cache"))] +pub fn create_cache_store() -> Option<()> { + None +} diff --git a/src/runtime/processor/wasm/wasm_host.rs b/src/runtime/processor/wasm/wasm_host.rs new file mode 100644 index 00000000..4b89a84d --- /dev/null +++ b/src/runtime/processor/wasm/wasm_host.rs @@ -0,0 +1,521 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::output::OutputSink; +use crate::runtime::processor::wasm::wasm_cache; +use crate::storage::state_backend::{StateStore, StateStoreFactory}; +use std::sync::{Arc, OnceLock}; +use wasmtime::component::{Component, Linker, Resource, bindgen}; +use wasmtime::{Config, Engine, OptLevel, Store, WasmBacktraceDetails}; +use wasmtime_wasi::{ResourceTable, WasiCtx, WasiCtxBuilder, WasiView}; + +static GLOBAL_ENGINE: OnceLock> = OnceLock::new(); + +fn enable_incremental_compilation(config: &mut Config) -> bool { + if !wasm_cache::is_cache_enabled() { + return false; + } + + #[cfg(feature = "incremental-cache")] + { + if let Some(cache_store) = wasm_cache::create_cache_store() { + let _ = config.enable_incremental_compilation(cache_store); + return true; + } + } + + config.cache_config_load_default().is_ok() +} + +fn get_global_engine(_wasm_size: usize) -> anyhow::Result> { + if let Some(engine) = GLOBAL_ENGINE.get() { + return Ok(Arc::clone(engine)); + } + + let engine = GLOBAL_ENGINE.get_or_init(|| { + let mut config = Config::new(); + config.wasm_component_model(true); + config.async_support(false); + config.cranelift_opt_level(OptLevel::Speed); + config.debug_info(false); + config.generate_address_map(false); + config.parallel_compilation(true); + enable_incremental_compilation(&mut config); + config.debug_info(true); + config.wasm_backtrace_details(WasmBacktraceDetails::Environment); + + let engine = Engine::new(&config).unwrap_or_else(|e| { + panic!("Failed to create global wasm engine: {}", e); + }); + + Arc::new(engine) + }); + + Ok(Arc::clone(engine)) +} + +bindgen!({ + world: "processor", + path: "wit/processor.wit", + async: false, + with: { + "functionstream:core/kv/store": FunctionStreamStoreHandle, + "functionstream:core/kv/iterator": FunctionStreamIteratorHandle, + } +}); + +use functionstream::core::kv::{self, ComplexKey, Error, HostIterator, HostStore}; + +pub struct FunctionStreamStoreHandle { + pub name: String, + pub state_store: Box, +} + +impl Drop for FunctionStreamStoreHandle { + fn drop(&mut self) {} +} + +pub struct FunctionStreamIteratorHandle { + pub state_iterator: Box, +} + +pub struct HostState { + pub wasi: WasiCtx, + pub table: ResourceTable, + pub factory: Arc, + pub output_sinks: Vec>, +} + +impl WasiView for HostState { + fn table(&mut self) -> &mut ResourceTable { + &mut self.table + } + fn ctx(&mut self) -> &mut WasiCtx { + &mut self.wasi + } +} + +impl kv::Host for HostState {} + +impl HostStore for HostState { + fn new(&mut self, name: String) -> Resource { + let state_store = self + .factory + .new_state_store(Some(name.clone())) + .unwrap_or_else(|e| { + panic!("Failed to create state store: {}", e); + }); + + let handle = FunctionStreamStoreHandle { name, state_store }; + self.table.push(handle).unwrap_or_else(|e| { + panic!("Failed to push resource to table: {}", e); + }) + } + + fn put_state( + &mut self, + self_: Resource, + key: Vec, + value: Vec, + ) -> Result<(), Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + store + .state_store + .put_state(key, value) + .map_err(|e| Error::Other(format!("Failed to put state: {}", e))) + } + + fn get_state( + &mut self, + self_: Resource, + key: Vec, + ) -> Result>, Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + store + .state_store + .get_state(key) + .map_err(|e| Error::Other(format!("Failed to get state: {}", e))) + } + + fn delete_state( + &mut self, + self_: Resource, + key: Vec, + ) -> Result<(), Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + store + .state_store + .delete_state(key) + .map_err(|e| Error::Other(format!("Failed to delete state: {}", e))) + } + + fn list_states( + &mut self, + self_: Resource, + start: Vec, + end: Vec, + ) -> Result>, Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + store + .state_store + .list_states(start, end) + .map_err(|e| Error::Other(format!("Failed to list states: {}", e))) + } + + fn put( + &mut self, + self_: Resource, + key: ComplexKey, + value: Vec, + ) -> Result<(), Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let real_key = crate::storage::state_backend::key_builder::build_key( + &key.key_group, + &key.key, + &key.namespace, + &key.user_key, + ); + + store + .state_store + .put_state(real_key, value) + .map_err(|e| Error::Other(format!("Failed to put: {}", e))) + } + + fn get( + &mut self, + self_: Resource, + key: ComplexKey, + ) -> Result>, Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let real_key = crate::storage::state_backend::key_builder::build_key( + &key.key_group, + &key.key, + &key.namespace, + &key.user_key, + ); + + store + .state_store + .get_state(real_key) + .map_err(|e| Error::Other(format!("Failed to get: {}", e))) + } + + fn delete( + &mut self, + self_: Resource, + key: ComplexKey, + ) -> Result<(), Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let real_key = crate::storage::state_backend::key_builder::build_key( + &key.key_group, + &key.key, + &key.namespace, + &key.user_key, + ); + + store + .state_store + .delete_state(real_key) + .map_err(|e| Error::Other(format!("Failed to delete: {}", e)))?; + Ok(()) + } + + fn merge( + &mut self, + self_: Resource, + key: ComplexKey, + value: Vec, + ) -> Result<(), Error> { + self.put(self_, key, value) + } + + fn delete_prefix( + &mut self, + self_: Resource, + key: ComplexKey, + ) -> Result<(), Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let prefix_key = crate::storage::state_backend::key_builder::build_key( + &key.key_group, + &key.key, + &key.namespace, + &[], + ); + + let keys_to_delete = store + .state_store + .list_states(prefix_key.clone(), { + let mut end_prefix = prefix_key.clone(); + end_prefix.extend_from_slice(&vec![0xFF; 256]); + end_prefix + }) + .map_err(|e| Error::Other(format!("Failed to list keys for delete_prefix: {}", e)))?; + + for key_to_delete in keys_to_delete { + store.state_store.delete_state(key_to_delete).map_err(|e| { + Error::Other(format!("Failed to delete key in delete_prefix: {}", e)) + })?; + } + + Ok(()) + } + + fn list_complex( + &mut self, + self_: Resource, + key_group: Vec, + key: Vec, + namespace: Vec, + start_inclusive: Vec, + end_exclusive: Vec, + ) -> Result>, Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let start_key = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &start_inclusive, + ); + let end_key = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &end_exclusive, + ); + + store + .state_store + .list_states(start_key, end_key) + .map_err(|e| Error::Other(format!("Failed to list_complex: {}", e))) + } + + fn scan_complex( + &mut self, + self_: Resource, + key_group: Vec, + key: Vec, + namespace: Vec, + ) -> Result, Error> { + let store = self + .table + .get(&self_) + .map_err(|e| Error::Other(format!("Failed to get store resource: {}", e)))?; + + let state_iterator = store + .state_store + .scan_complex(key_group, key, namespace) + .map_err(|e| Error::Other(format!("Failed to scan_complex: {}", e)))?; + + let iter = FunctionStreamIteratorHandle { state_iterator }; + self.table + .push(iter) + .map_err(|e| Error::Other(format!("Failed to push iterator resource: {}", e))) + } + + fn drop(&mut self, rep: Resource) -> Result<(), anyhow::Error> { + self.table + .delete(rep) + .map_err(|e| anyhow::anyhow!("Failed to delete store resource: {}", e))?; + Ok(()) + } +} + +impl HostIterator for HostState { + fn has_next(&mut self, self_: Resource) -> Result { + let iter = self + .table + .get_mut(&self_) + .map_err(|e| Error::Other(format!("Failed to get iterator resource: {}", e)))?; + + iter.state_iterator + .has_next() + .map_err(|e| Error::Other(format!("Failed to check has_next: {}", e))) + } + + fn next( + &mut self, + self_: Resource, + ) -> Result, Vec)>, Error> { + let iter = self + .table + .get_mut(&self_) + .map_err(|e| Error::Other(format!("Failed to get iterator resource: {}", e)))?; + + iter.state_iterator + .next() + .map_err(|e| Error::Other(format!("Failed to get next: {}", e))) + } + + fn drop(&mut self, rep: Resource) -> Result<(), anyhow::Error> { + self.table + .delete(rep) + .map_err(|e| anyhow::anyhow!("Failed to delete iterator resource: {}", e))?; + Ok(()) + } +} + +impl functionstream::core::collector::Host for HostState { + fn emit(&mut self, target_id: u32, data: Vec) { + let sink_count = self.output_sinks.len(); + let sink = self + .output_sinks + .get_mut(target_id as usize) + .unwrap_or_else(|| { + panic!("Invalid target_id: {target_id}, available sinks: {sink_count}"); + }); + + let buffer_or_event = + BufferOrEvent::new_buffer(data, Some(format!("target_{}", target_id)), false, false); + + sink.collect(buffer_or_event).unwrap_or_else(|e| { + panic!("failed to collect output: {e}"); + }); + } + + fn emit_watermark(&mut self, target_id: u32, ts: u64) { + let sink_count = self.output_sinks.len(); + let sink = self + .output_sinks + .get_mut(target_id as usize) + .unwrap_or_else(|| { + panic!("Invalid target_id: {target_id}, available sinks: {sink_count}"); + }); + + let mut watermark_data = Vec::with_capacity(12); + watermark_data.extend_from_slice(&target_id.to_le_bytes()); + watermark_data.extend_from_slice(&ts.to_le_bytes()); + + let buffer_or_event = BufferOrEvent::new_buffer( + watermark_data, + Some(format!("watermark_target_{}", target_id)), + false, + false, + ); + + sink.collect(buffer_or_event).unwrap_or_else(|e| { + panic!("failed to collect watermark: {e}"); + }); + } +} + +pub fn create_wasm_host_with_component( + engine: &Engine, + component: &Component, + output_sinks: Vec>, + init_context: &crate::runtime::taskexecutor::InitContext, + task_name: String, +) -> anyhow::Result<(Processor, Store)> { + let mut linker = Linker::new(engine); + + wasmtime_wasi::add_to_linker_sync(&mut linker) + .map_err(|e| anyhow::anyhow!("Failed to add WASI to linker: {}", e))?; + + functionstream::core::kv::add_to_linker(&mut linker, |s: &mut HostState| s) + .map_err(|e| anyhow::anyhow!("Failed to add kv interface to linker: {}", e))?; + + functionstream::core::collector::add_to_linker(&mut linker, |s: &mut HostState| s) + .map_err(|e| anyhow::anyhow!("Failed to add collector interface to linker: {}", e))?; + + let created_at = init_context + .task_storage + .load_task(&task_name) + .ok() + .map(|info| info.created_at) + .unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + }); + + let factory = init_context + .state_storage_server + .create_factory(task_name.clone(), created_at) + .map_err(|e| anyhow::anyhow!("Failed to create state store factory: {}", e))?; + + let mut store = Store::new( + engine, + HostState { + wasi: WasiCtxBuilder::new() + .inherit_stdio() + .inherit_env() + .inherit_args() + .build(), + table: ResourceTable::new(), + factory, + output_sinks, + }, + ); + + let processor = Processor::instantiate(&mut store, component, &linker).map_err(|e| { + let error_msg = format!("Failed to instantiate wasm component: {}", e); + let mut detailed_msg = error_msg.clone(); + if let Some(source) = e.source() { + detailed_msg.push_str(&format!(". Source: {}", source)); + } + anyhow::anyhow!("{}", detailed_msg) + })?; + + Ok((processor, store)) +} + +pub fn create_wasm_host( + wasm_bytes: &[u8], + output_sinks: Vec>, + init_context: &crate::runtime::taskexecutor::InitContext, + task_name: String, +) -> anyhow::Result<(Processor, Store)> { + let engine = get_global_engine(wasm_bytes.len())?; + + let component = Component::from_binary(&engine, wasm_bytes) + .map_err(|e| anyhow::anyhow!("Failed to parse WebAssembly component: {}", e))?; + + create_wasm_host_with_component(&engine, &component, output_sinks, init_context, task_name) +} diff --git a/src/runtime/processor/wasm/wasm_processor.rs b/src/runtime/processor/wasm/wasm_processor.rs new file mode 100644 index 00000000..d9134c33 --- /dev/null +++ b/src/runtime/processor/wasm/wasm_processor.rs @@ -0,0 +1,771 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WasmProcessor implementation +// +// This module provides a concrete implementation of the WasmProcessor trait +// that can load and execute WebAssembly modules. + +use super::wasm_host::{HostState, Processor}; +use super::wasm_processor_trait::WasmProcessor; +use crate::runtime::output::OutputSink; +use std::cell::RefCell; +use std::error::Error; +use std::fmt; +use std::sync::Arc; +use wasmtime::{Engine, Store, component::Component}; + +/// Error types for WasmProcessor +#[derive(Debug)] +pub enum WasmProcessorError { + /// Failed to load wasm module + LoadError(String), + /// Failed to initialize wasm module + InitError(String), + /// Failed to execute wasm function + ExecutionError(String), + /// wasm module not found + ModuleNotFound(String), + /// Invalid wasm module + InvalidModule(String), +} + +impl fmt::Display for WasmProcessorError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WasmProcessorError::LoadError(msg) => write!(f, "Failed to load wasm module: {}", msg), + WasmProcessorError::InitError(msg) => { + write!(f, "Failed to initialize wasm module: {}", msg) + } + WasmProcessorError::ExecutionError(msg) => { + write!(f, "Failed to execute wasm function: {}", msg) + } + WasmProcessorError::ModuleNotFound(path) => { + write!(f, "wasm module not found: {}", path) + } + WasmProcessorError::InvalidModule(msg) => write!(f, "Invalid wasm module: {}", msg), + } + } +} + +impl Error for WasmProcessorError {} + +pub struct WasmProcessorImpl { + modules: Vec<(String, Vec)>, + name: String, + init_config: std::collections::HashMap, + initialized: bool, + current_watermark: Option, + last_checkpoint_id: Option, + is_healthy: bool, + error_count: u32, + processor: RefCell>, + store: RefCell>>, + custom_engine: Option>, + custom_component: Option, + use_custom_engine_and_component: bool, +} + +// Since there is only one thread, we can safely implement Send + Sync +// Note: This requires WasmProcessorImpl to only be used in single-threaded environments +unsafe impl Send for WasmProcessorImpl {} +unsafe impl Sync for WasmProcessorImpl {} + +impl WasmProcessorImpl { + pub fn new( + name: String, + module_bytes: Vec, + init_config: std::collections::HashMap, + ) -> Self { + Self { + name, + modules: vec![(String::new(), module_bytes)], + init_config, + initialized: false, + current_watermark: None, + last_checkpoint_id: None, + is_healthy: true, + error_count: 0, + processor: RefCell::new(None), + store: RefCell::new(None), + custom_engine: None, + custom_component: None, + use_custom_engine_and_component: false, + } + } + + pub fn new_with_custom_engine_and_component( + name: String, + modules: &[(String, Vec)], + init_config: std::collections::HashMap, + custom_engine: Arc, + custom_component: Component, + ) -> Self { + Self { + name, + modules: modules.to_vec(), + init_config, + initialized: false, + current_watermark: None, + last_checkpoint_id: None, + is_healthy: true, + error_count: 0, + processor: RefCell::new(None), + store: RefCell::new(None), + custom_engine: Some(custom_engine), + custom_component: Some(custom_component), + use_custom_engine_and_component: true, + } + } + + /// Get the processor name + pub fn name(&self) -> &str { + &self.name + } + + /// Execute Python function by calling fs_exec + /// + /// # Arguments + /// * `class_name` - Name of the Python class to load + /// * `modules` - List of modules (name, bytes) to load + /// + /// # Returns + /// Ok(()) if execution succeeds, or an error if it fails + pub fn exec_python_function( + &self, + class_name: &str, + modules: &[(String, Vec)], + ) -> Result<(), Box> { + if !self.initialized { + return Err(Box::new(WasmProcessorError::InitError( + "Processor not initialized. Call init_with_context() first.".to_string(), + ))); + } + + let processor_ref = self.processor.borrow(); + let processor = processor_ref + .as_ref() + .ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + log::info!( + "Calling fs_exec: class_name='{}', modules={}", + class_name, + modules.len() + ); + + processor.call_fs_exec(store, class_name, modules).map_err( + |e| -> Box { + Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to call fs_exec with class_name '{}': {}", + class_name, e + ))) + }, + )?; + + log::info!("fs_exec completed successfully for class '{}'", class_name); + Ok(()) + } +} + +impl WasmProcessor for WasmProcessorImpl { + fn init_with_context( + &mut self, + _init_context: &crate::runtime::taskexecutor::InitContext, + ) -> Result<(), Box> { + if self.initialized { + log::warn!("WasmProcessor '{}' already initialized", self.name); + return Ok(()); + } + + // Note: WasmHost initialization requires output_sinks + // But sinks are not ready yet, so WasmHost will be initialized later via init_wasm_host + // Here we only do basic initialization checks + + self.initialized = true; + self.is_healthy = true; + self.error_count = 0; + Ok(()) + } + + /// Process input data using the wasm module + /// + /// # Arguments + /// * `data` - Input data as bytes + /// * `input_index` - Index of the input source (0-based) + /// + /// # Note + /// The actual processed data is sent via collector::emit in wasm + fn process(&self, data: Vec, input_index: usize) -> Result<(), Box> { + if !self.initialized { + return Err(Box::new(WasmProcessorError::InitError( + "Processor not initialized. Call init_with_context() first.".to_string(), + ))); + } + + // Get mutable references to processor and store + let processor_ref = self.processor.borrow(); + let processor = processor_ref + .as_ref() + .ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + // Call wasm process function + // WIT: export fs-process: func(source-id: u32, data: list); + let payload_str = String::from_utf8_lossy(&data); + log::info!( + "Calling fs_process: input_index={}, data_len={}, payload={}", + input_index, + data.len(), + payload_str + ); + + let start = std::time::Instant::now(); + processor + .call_fs_process(store, input_index as u32, &data) + .map_err(|e| -> Box { + Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to call wasm process: {}", + e + ))) + })?; + let elapsed_us = start.elapsed().as_micros(); + log::info!( + "fs_process completed: input_index={}, elapsed={}us", + input_index, + elapsed_us + ); + + log::debug!( + "WasmProcessor '{}' processed {} bytes from input {}", + self.name, + data.len(), + input_index + ); + + Ok(()) + } + + /// Process watermark + /// + /// # Arguments + /// * `timestamp` - Watermark timestamp + /// * `input_index` - Index of the input source that generated the watermark (0-based) + fn process_watermark( + &mut self, + timestamp: u64, + input_index: usize, + ) -> Result<(), Box> { + if !self.initialized { + return Err(Box::new(WasmProcessorError::InitError( + "Processor not initialized. Call init_with_context() first.".to_string(), + ))); + } + + // Get mutable references to processor and store + let processor_ref = self.processor.borrow(); + let processor = processor_ref + .as_ref() + .ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + // Call wasm process_watermark function + // WIT: export fs-process-watermark: func(source-id: u32, watermark: u64); + processor + .call_fs_process_watermark(store, input_index as u32, timestamp) + .map_err(|e| -> Box { + Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to call wasm process_watermark: {}", + e + ))) + })?; + + // Update current watermark + if self.current_watermark.is_none() || timestamp > self.current_watermark.unwrap() { + self.current_watermark = Some(timestamp); + log::debug!( + "WasmProcessor '{}' processed watermark: {} from input {}", + self.name, + timestamp, + input_index + ); + } else { + log::warn!( + "WasmProcessor '{}' received watermark {} from input {} which is not greater than current {}", + self.name, + timestamp, + input_index, + self.current_watermark.unwrap() + ); + } + + Ok(()) + } + + /// Take a checkpoint + fn take_checkpoint(&mut self, checkpoint_id: u64) -> Result<(), Box> { + if !self.initialized { + return Err(Box::new(WasmProcessorError::InitError( + "Processor not initialized. Call init_with_context() first.".to_string(), + ))); + } + + log::info!( + "WasmProcessor '{}' taking checkpoint: {}", + self.name, + checkpoint_id + ); + + // Get mutable references to processor and store + let processor_ref = self.processor.borrow(); + let processor = processor_ref + .as_ref() + .ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + // Call wasm take_checkpoint function + // WIT: export fs-take-checkpoint: func(checkpoint-id: u64) -> list; + processor + .call_fs_take_checkpoint(store, checkpoint_id) + .map_err(|e| -> Box { + Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to call wasm take_checkpoint: {}", + e + ))) + })?; + + log::debug!( + "WasmProcessor '{}' checkpoint {} created", + self.name, + checkpoint_id + ); + + // Store checkpoint metadata + self.last_checkpoint_id = Some(checkpoint_id); + + // TODO: Persist checkpoint_data to storage + // For now, only log, actual persistence logic should be handled by the caller + + Ok(()) + } + + /// Finish a checkpoint + fn finish_checkpoint(&mut self, checkpoint_id: u64) -> Result<(), Box> { + if !self.initialized { + return Err(Box::new(WasmProcessorError::InitError( + "Processor not initialized. Call init_with_context() first.".to_string(), + ))); + } + + if self.last_checkpoint_id != Some(checkpoint_id) { + return Err(Box::new(WasmProcessorError::ExecutionError(format!( + "Checkpoint ID mismatch: expected {}, got {}", + self.last_checkpoint_id.unwrap_or(0), + checkpoint_id + )))); + } + + log::info!( + "WasmProcessor '{}' finishing checkpoint: {}", + self.name, + checkpoint_id + ); + + // TODO: In a real implementation, you would: + // 1. Finalize the checkpoint + // 2. Commit checkpoint data to storage + // 3. Clean up temporary checkpoint files + + Ok(()) + } + + /// Restore state from checkpoint + fn restore_state(&mut self, checkpoint_id: u64) -> Result<(), Box> { + log::info!( + "WasmProcessor '{}' restoring state from checkpoint: {}", + self.name, + checkpoint_id + ); + + // TODO: In a real implementation, you would: + // 1. Load checkpoint data from storage + // 2. Restore wasm module state + // 3. Restore internal state (watermark, buffers, etc.) + // 4. Reinitialize the processor with restored state + + self.last_checkpoint_id = Some(checkpoint_id); + self.is_healthy = true; + self.error_count = 0; + + Ok(()) + } + + /// Check if the processor is healthy + fn is_healthy(&self) -> bool { + if !self.initialized { + return false; + } + + // Check if error count exceeds threshold + if self.error_count > 10 { + return false; + } + + self.is_healthy + } + + /// Close the wasm processor and clean up resources + /// + /// This method should: + /// 1. Clean up any wasm module instances + /// 2. Release any allocated resources + /// 3. Finalize any pending checkpoints + /// + /// # Returns + /// Ok(()) if cleanup succeeds, or an error if it fails + fn close(&mut self) -> Result<(), Box> { + if !self.initialized { + log::warn!( + "WasmProcessor '{}' not initialized, nothing to close", + self.name + ); + return Ok(()); + } + + log::info!("Closing WasmProcessor '{}'", self.name); + + // TODO: Implement actual cleanup + // In a real implementation, you would: + // 1. Drop wasm module instances + // 2. Release any allocated memory + // 3. Close any open file handles + // 4. Finalize any pending checkpoints + // 5. Clean up watermark state + + // Reset state + self.initialized = false; + self.is_healthy = false; + self.current_watermark = None; + self.error_count = 0; + + log::info!("WasmProcessor '{}' closed successfully", self.name); + Ok(()) + } + + fn init_wasm_host( + &mut self, + output_sinks: Vec>, + init_context: &crate::runtime::taskexecutor::InitContext, + task_name: String, + ) -> Result<(), Box> { + use super::wasm_host::create_wasm_host; + + if self.processor.borrow().is_some() || self.store.borrow().is_some() { + log::warn!("WasmHost for processor '{}' already initialized", self.name); + return Ok(()); + } + + let (processor, store) = if self.use_custom_engine_and_component { + let engine = self.custom_engine.as_ref().ok_or_else(|| { + Box::new(WasmProcessorError::InitError( + "use_custom_engine_and_component is true but custom_engine is None".to_string(), + )) as Box + })?; + let component = self.custom_component.as_ref().ok_or_else(|| { + Box::new(WasmProcessorError::InitError( + "use_custom_engine_and_component is true but custom_component is None" + .to_string(), + )) as Box + })?; + use super::wasm_host::create_wasm_host_with_component; + create_wasm_host_with_component( + engine, + component, + output_sinks, + init_context, + task_name, + ) + .map_err(|e| -> Box { + let error_msg = format!( + "Failed to create WasmHost with custom engine/component: {}", + e + ); + log::error!("{}", error_msg); + let mut full_error = error_msg.clone(); + let mut source = e.source(); + let mut depth = 0; + while let Some(err) = source { + depth += 1; + full_error.push_str(&format!("\n Caused by ({}): {}", depth, err)); + source = err.source(); + if depth > 10 { + full_error.push_str("\n ... (error chain too long, truncated)"); + break; + } + } + log::error!("Full error chain:\n{}", full_error); + Box::new(WasmProcessorError::InitError(full_error)) + })? + } else { + let first_bytes = self + .modules + .first() + .map(|(_, b)| b.as_slice()) + .unwrap_or(&[]); + create_wasm_host(first_bytes, output_sinks, init_context, task_name).map_err( + |e| -> Box { + let error_msg = format!("Failed to create WasmHost: {}", e); + log::error!("{}", error_msg); + let mut full_error = error_msg.clone(); + let mut source = e.source(); + let mut depth = 0; + while let Some(err) = source { + depth += 1; + full_error.push_str(&format!("\n Caused by ({}): {}", depth, err)); + source = err.source(); + if depth > 10 { + full_error.push_str("\n ... (error chain too long, truncated)"); + break; + } + } + log::error!("Full error chain:\n{}", full_error); + Box::new(WasmProcessorError::InitError(full_error)) + }, + )? + }; + + *self.processor.borrow_mut() = Some(processor); + *self.store.borrow_mut() = Some(store); + + let config_list: Vec<(String, String)> = self + .init_config + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + + { + let processor_ref = self.processor.borrow(); + let processor = processor_ref.as_ref().unwrap(); + + if self.use_custom_engine_and_component { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().unwrap(); + + let class_name = self + .init_config + .get("class_name") + .or_else(|| self.init_config.get("processor_class")) + .map(|s| s.clone()) + .unwrap_or_else(|| self.name.clone()); + + processor + .call_fs_exec(store, &class_name, &self.modules) + .map_err(|e| -> Box { + Box::new(WasmProcessorError::InitError(format!( + "Failed to call fs_exec with class_name '{}' and modules: {}", + class_name, e + ))) + })?; + } + + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().unwrap(); + processor + .call_fs_init(store, &config_list) + .map_err(|e| -> Box { + Box::new(WasmProcessorError::InitError(format!( + "Failed to call fs_init: {}", + e + ))) + })?; + } + + Ok(()) + } + + fn start_sinks(&mut self) -> Result<(), Box> { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let host_state = store.data_mut(); + for (idx, sink) in host_state.output_sinks.iter_mut().enumerate() { + if let Err(e) = sink.start() { + log::error!("Failed to start sink {}: {}", idx, e); + return Err(Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to start sink {}: {}", + idx, e + )))); + } + } + + log::debug!( + "All {} sinks started successfully", + host_state.output_sinks.len() + ); + Ok(()) + } + + /// Stop all output sinks + fn stop_sinks(&mut self) -> Result<(), Box> { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let host_state = store.data_mut(); + for (idx, sink) in host_state.output_sinks.iter_mut().enumerate() { + if let Err(e) = sink.stop() { + log::warn!("Failed to stop sink {}: {}", idx, e); + // Continue stopping other sinks even if one fails + } + } + + log::debug!("All {} sinks stopped", host_state.output_sinks.len()); + Ok(()) + } + + /// Take checkpoint for all output sinks + fn take_checkpoint_sinks(&mut self, checkpoint_id: u64) -> Result<(), Box> { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let host_state = store.data_mut(); + for (idx, sink) in host_state.output_sinks.iter_mut().enumerate() { + if let Err(e) = sink.take_checkpoint(checkpoint_id) { + log::error!("Failed to checkpoint sink {}: {}", idx, e); + return Err(Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to checkpoint sink {}: {}", + idx, e + )))); + } + } + + log::debug!( + "Checkpoint {} taken for all {} sinks", + checkpoint_id, + host_state.output_sinks.len() + ); + Ok(()) + } + + /// Finish checkpoint for all output sinks + fn finish_checkpoint_sinks(&mut self, checkpoint_id: u64) -> Result<(), Box> { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let host_state = store.data_mut(); + for (idx, sink) in host_state.output_sinks.iter_mut().enumerate() { + if let Err(e) = sink.finish_checkpoint(checkpoint_id) { + log::error!("Failed to finish checkpoint for sink {}: {}", idx, e); + return Err(Box::new(WasmProcessorError::ExecutionError(format!( + "Failed to finish checkpoint for sink {}: {}", + idx, e + )))); + } + } + + log::debug!( + "Checkpoint {} finished for all {} sinks", + checkpoint_id, + host_state.output_sinks.len() + ); + Ok(()) + } + + /// Close all output sinks + fn close_sinks(&mut self) -> Result<(), Box> { + let mut store_ref = self.store.borrow_mut(); + let store = store_ref.as_mut().ok_or_else(|| -> Box { + Box::new(WasmProcessorError::InitError( + "WasmHost not initialized. Call init_wasm_host() first.".to_string(), + )) + })?; + + let host_state = store.data_mut(); + for (idx, sink) in host_state.output_sinks.iter_mut().enumerate() { + if let Err(e) = sink.stop() { + log::warn!("Failed to stop sink {} during close: {}", idx, e); + } + if let Err(e) = sink.close() { + log::warn!("Failed to close sink {}: {}", idx, e); + // Continue closing other sinks even if one fails + } + } + + log::debug!("All {} sinks closed", host_state.output_sinks.len()); + Ok(()) + } + + /// Execute Python function dynamically + fn exec_python_function( + &self, + class_name: &str, + modules: &[(String, Vec)], + ) -> Result<(), Box> { + // Call the WasmProcessorImpl implementation + WasmProcessorImpl::exec_python_function(self, class_name, modules) + } +} diff --git a/src/runtime/processor/wasm/wasm_processor_trait.rs b/src/runtime/processor/wasm/wasm_processor_trait.rs new file mode 100644 index 00000000..ab7820ff --- /dev/null +++ b/src/runtime/processor/wasm/wasm_processor_trait.rs @@ -0,0 +1,278 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WasmProcessor trait definition +// +// This module defines the WasmProcessor trait, which is the interface +// for WebAssembly-based data processors in the stream processing system. + +use crate::runtime::output::OutputSink; +use crate::runtime::taskexecutor::InitContext; + +/// wasm Processor trait +/// +/// This trait defines the interface for processing data using WebAssembly modules. +/// Implementations should load and execute wasm modules to process stream data. +pub trait WasmProcessor: Send + Sync { + /// Process input data + /// + /// # Arguments + /// * `data` - Input data as bytes + /// * `input_index` - Index of the input source (0-based) + /// + /// # Note + /// The actual processed data is sent via collector::emit in wasm + fn process( + &self, + data: Vec, + input_index: usize, + ) -> Result<(), Box>; + + /// Process watermark + /// + /// # Arguments + /// * `timestamp` - Watermark timestamp + /// * `input_index` - Index of the input source that generated the watermark (0-based) + /// + /// # Returns + /// Ok(()) if processing succeeds, or an error if it fails + fn process_watermark( + &mut self, + timestamp: u64, + input_index: usize, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!( + "Processing watermark: {} from input {}", + timestamp, + input_index + ); + Ok(()) + } + + /// Initialize processor with initialization context + /// + /// This method should: + /// 1. Load the wasm module from the file system + /// 2. Validate the module + /// 3. Prepare the module for execution + /// + /// # Arguments + /// - `init_context`: Initialization context containing state storage, task storage and other resources + /// + /// # Returns + /// Ok(()) if initialization succeeds, or an error if it fails + fn init_with_context( + &mut self, + init_context: &InitContext, + ) -> Result<(), Box>; + + /// Initialize WasmHost with output sinks + /// + /// This method should be called after init_with_context to initialize WasmHost with output sinks. + /// + /// # Arguments + /// - `output_sinks`: Output sink list + /// - `init_context`: Initialization context + /// - `task_name`: Task name + /// + /// # Returns + /// Ok(()) if initialization succeeds, or an error if it fails + fn init_wasm_host( + &mut self, + _output_sinks: Vec>, + _init_context: &InitContext, + _task_name: String, + ) -> Result<(), Box> { + Ok(()) + } + + /// Take a checkpoint + /// + /// This method should: + /// 1. Save the current state of the wasm module + /// 2. Save any internal state (watermark, buffers, etc.) + /// 3. Persist the checkpoint to storage + /// + /// # Arguments + /// * `checkpoint_id` - Unique identifier for this checkpoint + /// + /// # Returns + /// Ok(()) if checkpoint succeeds, or an error if it fails + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!("Taking checkpoint: {}", checkpoint_id); + Ok(()) + } + + /// Finish a checkpoint + /// + /// This method should: + /// 1. Finalize the checkpoint + /// 2. Commit checkpoint data to storage + /// 3. Clean up temporary checkpoint files + /// + /// # Arguments + /// * `checkpoint_id` - Unique identifier for this checkpoint + /// + /// # Returns + /// Ok(()) if checkpoint finish succeeds, or an error if it fails + fn finish_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!("Finishing checkpoint: {}", checkpoint_id); + Ok(()) + } + + /// Restore state from checkpoint + /// + /// This method should: + /// 1. Load checkpoint data from storage + /// 2. Restore wasm module state + /// 3. Restore internal state (watermark, buffers, etc.) + /// 4. Reinitialize the processor with restored state + /// + /// # Arguments + /// * `checkpoint_id` - Unique identifier for the checkpoint to restore from + /// + /// # Returns + /// Ok(()) if restore succeeds, or an error if it fails + fn restore_state( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!("Restoring state from checkpoint: {}", checkpoint_id); + Ok(()) + } + + /// Check if the processor is healthy + /// + /// This method should check the health status of the processor, + /// including whether it's initialized, if there are any errors, + /// and if the wasm module is functioning correctly. + /// + /// # Returns + /// `true` if the processor is healthy, `false` otherwise + fn is_healthy(&self) -> bool { + // Default implementation: always healthy + true + } + + /// Close the processor and clean up resources + /// + /// This method should: + /// 1. Clean up any wasm module instances + /// 2. Release any allocated resources + /// 3. Finalize any pending checkpoints + /// + /// # Returns + /// Ok(()) if cleanup succeeds, or an error if it fails + fn close(&mut self) -> Result<(), Box> { + Ok(()) + } + + /// Start all output sinks + /// + /// This method should start all output sinks managed by the processor. + /// + /// # Returns + /// Ok(()) if all sinks start successfully, or an error if any sink fails + fn start_sinks(&mut self) -> Result<(), Box> { + // Default implementation: do nothing + Ok(()) + } + + /// Stop all output sinks + /// + /// This method should stop all output sinks managed by the processor. + /// + /// # Returns + /// Ok(()) if all sinks stop successfully, or an error if any sink fails + fn stop_sinks(&mut self) -> Result<(), Box> { + // Default implementation: do nothing + Ok(()) + } + + /// Take checkpoint for all output sinks + /// + /// This method should trigger checkpoint for all output sinks. + /// + /// # Arguments + /// * `checkpoint_id` - Unique identifier for this checkpoint + /// + /// # Returns + /// Ok(()) if all sinks checkpoint successfully, or an error if any sink fails + fn take_checkpoint_sinks( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!("Taking checkpoint for sinks: {}", checkpoint_id); + Ok(()) + } + + /// Finish checkpoint for all output sinks + /// + /// This method should finish checkpoint for all output sinks. + /// + /// # Arguments + /// * `checkpoint_id` - Unique identifier for this checkpoint + /// + /// # Returns + /// Ok(()) if all sinks finish checkpoint successfully, or an error if any sink fails + fn finish_checkpoint_sinks( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + // Default implementation: do nothing + log::debug!("Finishing checkpoint for sinks: {}", checkpoint_id); + Ok(()) + } + + /// Close all output sinks + /// + /// This method should close all output sinks managed by the processor. + /// + /// # Returns + /// Ok(()) if all sinks close successfully, or an error if any sink fails + fn close_sinks(&mut self) -> Result<(), Box> { + // Default implementation: do nothing + Ok(()) + } + + /// Execute Python function dynamically + /// + /// This method calls fs_exec to load and execute Python code dynamically. + /// + /// # Arguments + /// * `class_name` - Name of the Python class to load + /// * `modules` - List of modules (name, bytes) to load + /// + /// # Returns + /// Ok(()) if execution succeeds, or an error if it fails + fn exec_python_function( + &self, + _class_name: &str, + _modules: &[(String, Vec)], + ) -> Result<(), Box> { + // Default implementation: not supported + Err(Box::new(std::io::Error::other( + "exec_python_function not supported by this processor", + ))) + } +} diff --git a/src/runtime/processor/wasm/wasm_task.rs b/src/runtime/processor/wasm/wasm_task.rs new file mode 100644 index 00000000..83fc05df --- /dev/null +++ b/src/runtime/processor/wasm/wasm_task.rs @@ -0,0 +1,919 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::thread_pool::ThreadGroup; +use super::wasm_processor_trait::WasmProcessor; +use crate::runtime::buffer_and_event::BufferOrEvent; +use crate::runtime::common::{ComponentState, TaskCompletionFlag}; +use crate::runtime::input::InputSource; +use crate::runtime::output::OutputSink; +use crate::runtime::task::TaskLifecycle; +use crate::storage::task::FunctionInfo; +use crossbeam_channel::{Receiver, Sender, bounded}; +use std::sync::atomic::{AtomicBool, AtomicU8, Ordering}; +use std::sync::mpsc; +use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; +use std::time::Duration; + +const CONTROL_OPERATION_TIMEOUT_MS: u64 = 5000; +const CONTROL_OPERATION_MAX_RETRIES: u32 = 3; +const MAX_BATCH_SIZE: usize = 100; + +enum TaskControlSignal { + Start { + completion_flag: TaskCompletionFlag, + }, + Stop { + completion_flag: TaskCompletionFlag, + }, + Cancel { + completion_flag: TaskCompletionFlag, + }, + Checkpoint { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, + CheckpointFinish { + checkpoint_id: u64, + completion_flag: TaskCompletionFlag, + }, + ExecPythonFunction { + class_name: String, + modules: Vec<(String, Vec)>, + completion_flag: TaskCompletionFlag, + }, + Close { + completion_flag: TaskCompletionFlag, + }, +} + +enum ControlAction { + Continue, + Pause, + Exit, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum TaskState { + Uninitialized, + Initialized, + Running, + Stopped, + Checkpointing, + Closing, + Closed, + Failed, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ExecutionState { + Created, + Deploying, + Initializing, + Running, + Finished, + Canceling, + Canceled, + Failed, +} + +impl ExecutionState { + fn from_u8(value: u8) -> Self { + match value { + 0 => ExecutionState::Created, + 1 => ExecutionState::Deploying, + 2 => ExecutionState::Initializing, + 3 => ExecutionState::Running, + 4 => ExecutionState::Finished, + 5 => ExecutionState::Canceling, + 6 => ExecutionState::Canceled, + 7 => ExecutionState::Failed, + _ => ExecutionState::Created, + } + } +} + +pub struct WasmTask { + task_name: String, + task_type: String, + inputs: Option>>, + processor: Option>, + sinks: Option>>, + state: Arc>, + control_sender: Option>, + task_thread: Option>, + thread_groups: Option>, + execution_state: Arc, + failure_cause: Arc>>, + thread_running: Arc, + termination_future: Arc>>>, +} + +impl WasmTask { + pub fn new( + task_name: String, + task_type: String, + inputs: Vec>, + processor: Box, + sinks: Vec>, + ) -> Self { + let (_tx, rx) = mpsc::channel(); + Self { + task_name, + task_type, + inputs: Some(inputs), + processor: Some(processor), + sinks: Some(sinks), + state: Arc::new(Mutex::new(ComponentState::Uninitialized)), + control_sender: None, + task_thread: None, + thread_groups: None, + execution_state: Arc::new(AtomicU8::new(ExecutionState::Created as u8)), + failure_cause: Arc::new(Mutex::new(None)), + thread_running: Arc::new(AtomicBool::new(false)), + termination_future: Arc::new(Mutex::new(Some(rx))), + } + } + + pub fn init_with_context( + &mut self, + init_context: &crate::runtime::taskexecutor::InitContext, + ) -> Result<(), Box> { + let mut inputs = self.inputs.take().ok_or_else(|| { + Box::new(std::io::Error::other("inputs already moved to thread")) + as Box + })?; + let mut processor = self.processor.take().ok_or_else(|| { + Box::new(std::io::Error::other("processor already moved to thread")) + as Box + })?; + let mut sinks = self.sinks.take().ok_or_else(|| { + Box::new(std::io::Error::other("sinks already moved to thread")) + as Box + })?; + + let init_context = init_context.clone(); + + for (idx, sink) in sinks.iter_mut().enumerate() { + if let Err(e) = sink.init_with_context(&init_context) { + log::error!("Failed to init sink {}: {}", idx, e); + return Err(Box::new(std::io::Error::other(format!( + "Failed to init sink {}: {}", + idx, e + )))); + } + } + + if let Err(e) = processor.init_with_context(&init_context) { + log::error!("Failed to init processor: {}", e); + return Err(Box::new(std::io::Error::other(format!( + "Failed to init processor: {}", + e + )))); + } + + if let Err(e) = processor.init_wasm_host(sinks, &init_context, self.task_name.clone()) { + log::error!("Failed to init WasmHost: {}", e); + return Err(Box::new(std::io::Error::other(format!( + "Failed to init WasmHost: {}", + e + )))); + } + + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.init_with_context(&init_context) { + log::error!("Failed to init input {}: {}", idx, e); + return Err(Box::new(std::io::Error::other(format!( + "Failed to init input {}: {}", + idx, e + )))); + } + } + + let (control_sender, control_receiver) = bounded(10); + self.control_sender = Some(control_sender); + + let task_name = self.task_name.clone(); + let state = self.state.clone(); + let execution_state = self.execution_state.clone(); + let thread_running = self.thread_running.clone(); + let termination_tx = { + let (_tx, rx) = mpsc::channel(); + *self.termination_future.lock().unwrap() = Some(rx); + _tx + }; + + thread_running.store(true, Ordering::Relaxed); + self.execution_state + .store(ExecutionState::Initializing as u8, Ordering::Relaxed); + + let thread_handle = thread::Builder::new() + .name(format!("stream-task-{}", task_name)) + .spawn(move || { + Self::task_thread_loop(task_name, inputs, processor, control_receiver, state); + + execution_state.store(ExecutionState::Finished as u8, Ordering::Relaxed); + thread_running.store(false, Ordering::Relaxed); + let _ = termination_tx.send(ExecutionState::Finished); + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to start task thread: {}", + e + ))) as Box + })?; + + use crate::runtime::processor::wasm::thread_pool::{ThreadGroup, ThreadGroupType}; + let mut main_runloop_group = ThreadGroup::new( + ThreadGroupType::MainRunloop, + format!("MainRunloop-{}", self.task_name), + ); + main_runloop_group.add_thread(thread_handle); + init_context.register_thread_group(main_runloop_group); + + let thread_groups = init_context.take_thread_groups(); + self.thread_groups = Some(thread_groups); + + self.task_thread = None; + self.execution_state + .store(ExecutionState::Running as u8, Ordering::Relaxed); + + Ok(()) + } + + fn task_thread_loop( + task_name: String, + mut inputs: Vec>, + mut processor: Box, + control_receiver: Receiver, + shared_state: Arc>, + ) { + let thread_start_time = std::time::Instant::now(); + use crossbeam_channel::select; + + let init_start = std::time::Instant::now(); + let mut state = TaskState::Initialized; + let mut current_input_index: usize = 0; + let mut is_running = false; + let init_elapsed = init_start.elapsed().as_secs_f64(); + log::debug!( + "[Timing] task_thread_loop - Initialize local state: {:.3}s", + init_elapsed + ); + + let lock_start = std::time::Instant::now(); + *shared_state.lock().unwrap() = ComponentState::Initialized; + let lock_elapsed = lock_start.elapsed().as_secs_f64(); + log::debug!( + "[Timing] task_thread_loop - Update shared state: {:.3}s", + lock_elapsed + ); + + let thread_init_elapsed = thread_start_time.elapsed().as_secs_f64(); + log::debug!( + "Task thread started (paused): {} (thread init: {:.3}s)", + task_name, + thread_init_elapsed + ); + + loop { + if is_running { + select! { + recv(control_receiver) -> result => { + match result { + Ok(signal) => { + match Self::handle_control_signal( + signal, + &mut state, + &mut inputs, + &mut processor, + &shared_state, + &task_name, + ) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => is_running = false, + ControlAction::Exit => break, + } + } + Err(_) => { + log::warn!("Control channel disconnected: {}", task_name); + break; + } + } + } + default => { + Self::process_batch( + &mut inputs, + &mut processor, + &mut current_input_index, + ); + } + } + } else { + match control_receiver.recv() { + Ok(signal) => { + match Self::handle_control_signal( + signal, + &mut state, + &mut inputs, + &mut processor, + &shared_state, + &task_name, + ) { + ControlAction::Continue => is_running = true, + ControlAction::Pause => is_running = false, + ControlAction::Exit => break, + } + } + Err(_) => { + log::warn!("Control channel disconnected: {}", task_name); + break; + } + } + } + } + + Self::cleanup_resources(&mut inputs, &mut processor, &task_name); + log::info!("Task thread exiting: {}", task_name); + } + + fn handle_control_signal( + signal: TaskControlSignal, + state: &mut TaskState, + inputs: &mut Vec>, + processor: &mut Box, + shared_state: &Arc>, + task_name: &str, + ) -> ControlAction { + match signal { + TaskControlSignal::Start { completion_flag } => { + if *state != TaskState::Initialized && *state != TaskState::Stopped { + let error = format!("Cannot start in state: {:?}", state); + log::error!("{} for task: {}", error, task_name); + completion_flag.mark_error(error); + return ControlAction::Pause; + } + + log::debug!("Starting task: {}", task_name); + + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.start() { + log::error!("Failed to start input {}: {}", idx, e); + } + } + + if let Err(e) = processor.start_sinks() { + log::error!("Failed to start sinks: {}", e); + } + + *state = TaskState::Running; + *shared_state.lock().unwrap() = ComponentState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + + TaskControlSignal::Stop { completion_flag } => { + log::info!("Stopping task: {}", task_name); + + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.stop() { + log::warn!("Failed to stop input {}: {}", idx, e); + } + } + + if let Err(e) = processor.stop_sinks() { + log::warn!("Failed to stop sinks: {}", e); + } + + *state = TaskState::Stopped; + *shared_state.lock().unwrap() = ComponentState::Stopped; + completion_flag.mark_completed(); + ControlAction::Pause + } + + TaskControlSignal::Cancel { completion_flag } => { + log::info!("Canceling task: {}", task_name); + *state = TaskState::Stopped; + *shared_state.lock().unwrap() = ComponentState::Stopped; + completion_flag.mark_completed(); + ControlAction::Exit + } + + TaskControlSignal::Checkpoint { + checkpoint_id, + completion_flag, + } => { + if *state != TaskState::Running { + let error = format!("Cannot checkpoint in state: {:?}", state); + log::error!("{} for task: {}", error, task_name); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + + log::info!( + "Checkpoint {} started for task: {}", + checkpoint_id, + task_name + ); + *state = TaskState::Checkpointing; + *shared_state.lock().unwrap() = ComponentState::Checkpointing; + + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.take_checkpoint(checkpoint_id) { + log::error!("Failed to checkpoint input {}: {}", idx, e); + } + } + + if let Err(e) = processor.take_checkpoint_sinks(checkpoint_id) { + log::error!("Failed to checkpoint sinks: {}", e); + } + + completion_flag.mark_completed(); + ControlAction::Continue + } + + TaskControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag, + } => { + if *state != TaskState::Checkpointing { + let error = format!("Cannot finish checkpoint in state: {:?}", state); + log::error!("{} for task: {}", error, task_name); + completion_flag.mark_error(error); + return ControlAction::Continue; + } + + log::info!( + "Checkpoint {} finished for task: {}", + checkpoint_id, + task_name + ); + + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.finish_checkpoint(checkpoint_id) { + log::error!("Failed to finish checkpoint for input {}: {}", idx, e); + } + } + + if let Err(e) = processor.finish_checkpoint_sinks(checkpoint_id) { + log::error!("Failed to finish checkpoint for sinks: {}", e); + } + + *state = TaskState::Running; + *shared_state.lock().unwrap() = ComponentState::Running; + completion_flag.mark_completed(); + ControlAction::Continue + } + + TaskControlSignal::ExecPythonFunction { + class_name, + modules, + completion_flag, + } => { + log::info!( + "Executing Python function: class_name='{}', modules={} for task: {}", + class_name, + modules.len(), + task_name + ); + + match processor.exec_python_function(&class_name, &modules) { + Ok(()) => { + log::info!("Python function executed successfully: {}", class_name); + completion_flag.mark_completed(); + } + Err(e) => { + let error_msg = format!("Failed to execute Python function: {}", e); + log::error!("{} for task: {}", error_msg, task_name); + completion_flag.mark_error(error_msg); + } + } + + ControlAction::Continue + } + + TaskControlSignal::Close { completion_flag } => { + log::info!("Closing task: {}", task_name); + *state = TaskState::Closing; + *shared_state.lock().unwrap() = ComponentState::Closing; + + *state = TaskState::Closed; + *shared_state.lock().unwrap() = ComponentState::Closed; + completion_flag.mark_completed(); + ControlAction::Exit + } + } + } + + #[inline] + fn process_batch( + inputs: &mut Vec>, + processor: &mut Box, + current_input_index: &mut usize, + ) { + let input_count = inputs.len(); + if input_count == 0 { + return; + } + + let mut batch_count = 0; + + while batch_count < MAX_BATCH_SIZE { + let mut found_data = false; + + for _ in 0..input_count { + let input_idx = *current_input_index; + let input = &mut inputs[input_idx]; + *current_input_index = (*current_input_index + 1) % input_count; + + match input.get_next() { + Ok(Some(data)) => { + found_data = true; + Self::process_single_record(data, processor, input_idx); + batch_count += 1; + break; + } + Ok(None) => continue, + Err(e) => { + log::error!("Error reading input: {}", e); + continue; + } + } + } + + if !found_data { + break; + } + } + } + + #[inline] + fn process_single_record( + data: BufferOrEvent, + processor: &mut Box, + input_index: usize, + ) { + if !data.is_buffer() { + return; + } + + if let Some(buffer_bytes) = data.get_buffer() { + if let Err(e) = processor.process(buffer_bytes.to_vec(), input_index) { + log::error!("Processor error from input {}: {}", input_index, e); + } + } + } + + fn cleanup_resources( + inputs: &mut Vec>, + processor: &mut Box, + task_name: &str, + ) { + for (idx, input) in inputs.iter_mut().enumerate() { + if let Err(e) = input.stop() { + log::warn!("Failed to stop input {} for {}: {}", idx, task_name, e); + } + if let Err(e) = input.close() { + log::warn!("Failed to close input {} for {}: {}", idx, task_name, e); + } + } + + if let Err(e) = processor.close_sinks() { + log::warn!("Failed to close sinks for {}: {}", task_name, e); + } + + if let Err(e) = processor.close() { + log::warn!("Failed to close processor for {}: {}", task_name, e); + } + } + + fn wait_with_retry( + &self, + completion_flag: &TaskCompletionFlag, + operation_name: &str, + ) -> Result<(), Box> { + let timeout = Duration::from_millis(CONTROL_OPERATION_TIMEOUT_MS); + + for retry in 0..CONTROL_OPERATION_MAX_RETRIES { + match completion_flag.wait_timeout(timeout) { + Ok(_) => { + if let Some(error) = completion_flag.get_error() { + return Err(Box::new(std::io::Error::other(format!( + "{} failed: {}", + operation_name, error + )))); + } + return Ok(()); + } + Err(_) => { + log::warn!( + "{} timeout (retry {}/{}), task: {}", + operation_name, + retry + 1, + CONTROL_OPERATION_MAX_RETRIES, + self.task_name + ); + } + } + } + + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!( + "{} failed after {} retries", + operation_name, CONTROL_OPERATION_MAX_RETRIES + ), + ))) + } + + pub fn start(&self) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::Start { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send start signal: {}", + e + ))) as Box + })?; + } + self.wait_with_retry(&completion_flag, "Start") + } + + pub fn stop(&self) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::Stop { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send stop signal: {}", + e + ))) as Box + })?; + } + self.wait_with_retry(&completion_flag, "Stop") + } + + pub fn cancel(&self) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::Cancel { + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send cancel signal: {}", + e + ))) as Box + })?; + } + self.wait_with_retry(&completion_flag, "Cancel") + } + + pub fn take_checkpoint( + &self, + checkpoint_id: u64, + ) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::Checkpoint { + checkpoint_id, + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send checkpoint signal: {}", + e + ))) as Box + })?; + } + self.wait_with_retry(&completion_flag, "Checkpoint") + } + + pub fn finish_checkpoint( + &self, + checkpoint_id: u64, + ) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::CheckpointFinish { + checkpoint_id, + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send checkpoint finish signal: {}", + e + ))) as Box + })?; + } + self.wait_with_retry(&completion_flag, "CheckpointFinish") + } + + pub fn exec_python_function( + &self, + class_name: String, + modules: Vec<(String, Vec)>, + ) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + sender + .send(TaskControlSignal::ExecPythonFunction { + class_name, + modules, + completion_flag: completion_flag.clone(), + }) + .map_err(|e| { + Box::new(std::io::Error::other(format!( + "Failed to send exec_python_function signal: {}", + e + ))) as Box + })?; + } else { + return Err(Box::new(std::io::Error::other( + "Task is not initialized or control channel is not available", + ))); + } + self.wait_with_retry(&completion_flag, "ExecPythonFunction") + } + + pub fn close(&mut self) -> Result<(), Box> { + let completion_flag = TaskCompletionFlag::new(); + if let Some(ref sender) = self.control_sender { + let _ = sender.send(TaskControlSignal::Close { + completion_flag: completion_flag.clone(), + }); + let _ = self.wait_with_retry(&completion_flag, "Close"); + } + + if let Some(handle) = self.task_thread.take() + && let Err(e) = handle.join() + { + log::warn!("Task thread join error: {:?}", e); + } + + self.control_sender.take(); + log::info!("WasmTask closed: {}", self.task_name); + Ok(()) + } + + pub fn get_state(&self) -> ComponentState { + self.state.lock().unwrap().clone() + } + + pub fn is_running(&self) -> bool { + matches!(self.get_state(), ComponentState::Running) + } + + pub fn get_name(&self) -> &str { + &self.task_name + } + + pub fn take_thread_groups(&mut self) -> Option> { + self.thread_groups.take() + } + + pub fn wait_for_completion(&self) -> Result> { + if let Some(rx) = self.termination_future.lock().unwrap().take() { + rx.recv() + .map_err(|e| format!("Failed to receive termination state: {}", e).into()) + } else { + Err("Termination future already consumed".into()) + } + } + + pub fn get_execution_state(&self) -> ExecutionState { + ExecutionState::from_u8(self.execution_state.load(Ordering::Relaxed)) + } + + pub fn get_failure_cause(&self) -> Option { + self.failure_cause.lock().unwrap().clone() + } + + pub fn is_thread_alive(&self) -> bool { + self.thread_running.load(Ordering::Relaxed) + } + + pub fn join_thread(&mut self) -> Result<(), Box> { + if let Some(handle) = self.task_thread.take() { + handle + .join() + .map_err(|e| format!("Thread join error: {:?}", e))?; + } + Ok(()) + } + + pub fn try_join_thread(&mut self) -> Result> { + if !self.thread_running.load(Ordering::Relaxed) { + if let Some(handle) = self.task_thread.take() { + handle + .join() + .map_err(|e| format!("Thread join error: {:?}", e))?; + return Ok(true); + } + return Ok(true); + } + Ok(false) + } + + pub fn wait_thread_with_timeout( + &mut self, + timeout: Duration, + ) -> Result> { + let start = std::time::Instant::now(); + + while self.thread_running.load(Ordering::Relaxed) { + if start.elapsed() > timeout { + return Ok(false); + } + thread::sleep(Duration::from_millis(10)); + } + + if let Some(handle) = self.task_thread.take() { + handle + .join() + .map_err(|e| format!("Thread join error: {:?}", e))?; + } + + Ok(true) + } +} + +impl TaskLifecycle for WasmTask { + fn init_with_context( + &mut self, + init_context: &crate::runtime::taskexecutor::InitContext, + ) -> Result<(), Box> { + ::init_with_context(self, init_context) + } + + fn start(&mut self) -> Result<(), Box> { + ::start(self) + } + + fn stop(&mut self) -> Result<(), Box> { + ::stop(self) + } + + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box> { + ::take_checkpoint(self, checkpoint_id) + } + + fn close(&mut self) -> Result<(), Box> { + ::close(self) + } + + fn get_state(&self) -> ComponentState { + ::get_state(self) + } + + fn get_name(&self) -> &str { + &self.task_name + } + + fn get_function_info(&self) -> FunctionInfo { + FunctionInfo { + name: self.task_name.clone(), + task_type: self.task_type.clone(), + status: format!("{:?}", self.get_state()), + } + } + + fn exec_python_function( + &self, + class_name: &str, + modules: &[(String, Vec)], + ) -> Result<(), Box> { + ::exec_python_function(self, class_name.to_string(), modules.to_vec()) + } +} + +impl Drop for WasmTask { + fn drop(&mut self) { + if self.task_thread.is_some() { + let _ = self.close(); + } + } +} diff --git a/src/runtime/sink/mod.rs b/src/runtime/sink/mod.rs new file mode 100644 index 00000000..a0a2a6fc --- /dev/null +++ b/src/runtime/sink/mod.rs @@ -0,0 +1,15 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Sink module + +// TODO: Add sink implementation here diff --git a/src/runtime/source/mod.rs b/src/runtime/source/mod.rs new file mode 100644 index 00000000..8a05bf30 --- /dev/null +++ b/src/runtime/source/mod.rs @@ -0,0 +1,15 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source module + +// TODO: Add source implementation here diff --git a/src/runtime/task/builder/mod.rs b/src/runtime/task/builder/mod.rs new file mode 100644 index 00000000..9dcea1d5 --- /dev/null +++ b/src/runtime/task/builder/mod.rs @@ -0,0 +1,28 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Builder module - Task builder module +// +// Provides different types of task builders: +// - TaskBuilder: Main builder that dispatches to corresponding builders based on configuration type +// - ProcessorBuilder: Processor type task builder +// - SourceBuilder: Source type task builder (future support) +// - SinkBuilder: Sink type task builder (future support) + +mod processor; +#[cfg(feature = "python")] +mod python; +mod sink; +mod source; +mod task_builder; + +pub use task_builder::TaskBuilder; diff --git a/src/runtime/task/builder/processor/mod.rs b/src/runtime/task/builder/processor/mod.rs new file mode 100644 index 00000000..1e4056a4 --- /dev/null +++ b/src/runtime/task/builder/processor/mod.rs @@ -0,0 +1,158 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Processor Builder - Processor type task builder +// +// Specifically handles building logic for Processor type configuration + +use crate::runtime::input::{InputSource, InputSourceProvider}; +use crate::runtime::output::{OutputSink, OutputSinkProvider}; +use crate::runtime::processor::wasm::wasm_processor::WasmProcessorImpl; +use crate::runtime::processor::wasm::wasm_processor_trait::WasmProcessor; +use crate::runtime::processor::wasm::wasm_task::WasmTask; +use crate::runtime::task::yaml_keys::{TYPE, type_values}; +use crate::runtime::task::{InputConfig, OutputConfig, ProcessorConfig, WasmTaskConfig}; +use serde_yaml::Value; +use std::sync::Arc; + +/// ProcessorBuilder - Processor type task builder +pub struct ProcessorBuilder; + +impl ProcessorBuilder { + /// Create Processor type WasmTask from YAML configuration + /// + /// # Arguments + /// - `task_name`: Task name + /// - `yaml_value`: YAML configuration value (root-level configuration) + /// - `wasm_path`: wasm module path + /// + /// # Returns + /// - `Ok(Arc)`: Successfully created WasmTask + /// - `Err(...)`: Creation failed + pub fn build( + task_name: String, + yaml_value: &Value, + module_bytes: Vec, + ) -> Result, Box> { + let config_type = yaml_value + .get(TYPE) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Missing '{}' field in YAML config", TYPE), + )) as Box + })?; + + if config_type != type_values::PROCESSOR { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid config type '{}', expected '{}'", + config_type, + type_values::PROCESSOR + ), + )) as Box); + } + + let task_config = WasmTaskConfig::from_yaml_value(task_name.clone(), yaml_value)?; + + let total_inputs: usize = task_config + .input_groups + .iter() + .map(|group| group.inputs.len()) + .sum(); + + log::info!( + "Parsed processor config: {} input groups ({} total inputs), {} outputs, processor: {}", + task_config.input_groups.len(), + total_inputs, + task_config.outputs.len(), + task_config.processor.name + ); + + let mut all_inputs = Vec::new(); + for (group_idx, input_group) in task_config.input_groups.iter().enumerate() { + let group_inputs = Self::create_inputs_from_config(&input_group.inputs, group_idx) + .map_err(|e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to create input sources for input group #{}: {}", + group_idx + 1, + e + ), + )) + })?; + log::debug!( + "Created {} input source(s) for input group #{}", + group_inputs.len(), + group_idx + 1 + ); + all_inputs.extend(group_inputs); + } + log::debug!( + "Created {} total input source(s) from {} input group(s)", + all_inputs.len(), + task_config.input_groups.len() + ); + + let outputs = Self::create_outputs_from_config(&task_config.outputs)?; + log::debug!("Created {} output(s)", outputs.len()); + + let processor = Self::create_processor_from_config(&task_config.processor, module_bytes)?; + log::debug!("Created wasm processor: {}", task_config.processor.name); + + let task = WasmTask::new( + task_config.task_name.clone(), + type_values::PROCESSOR.to_string(), + all_inputs, + processor, + outputs, + ); + let task = Arc::new(task); + + log::debug!( + "WasmTask created successfully for processor task: {}", + task_config.task_name + ); + + Ok(task) + } + + fn create_inputs_from_config( + inputs: &[InputConfig], + group_idx: usize, + ) -> Result>, Box> { + InputSourceProvider::from_input_configs(inputs, group_idx) + } + + fn create_processor_from_config( + processor_config: &ProcessorConfig, + module_bytes: Vec, + ) -> Result, Box> { + let processor_impl = WasmProcessorImpl::new( + processor_config.name.clone(), + module_bytes, + processor_config.init_config.clone(), + ); + + Ok(Box::new(processor_impl)) + } + + /// Create OutputSink instances from OutputConfig list + fn create_outputs_from_config( + outputs: &[OutputConfig], + ) -> Result>, Box> { + OutputSinkProvider::from_output_configs(outputs) + } +} diff --git a/src/runtime/task/builder/python/mod.rs b/src/runtime/task/builder/python/mod.rs new file mode 100644 index 00000000..8830e25a --- /dev/null +++ b/src/runtime/task/builder/python/mod.rs @@ -0,0 +1,165 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// python Builder - python runtime task builder +// +// Specifically handles building logic for python runtime configuration + +use crate::runtime::input::{InputSource, InputSourceProvider}; +use crate::runtime::output::{OutputSink, OutputSinkProvider}; +use crate::runtime::processor::python::get_python_engine_and_component; +use crate::runtime::processor::wasm::wasm_processor::WasmProcessorImpl; +use crate::runtime::processor::wasm::wasm_processor_trait::WasmProcessor; +use crate::runtime::processor::wasm::wasm_task::WasmTask; +use crate::runtime::task::yaml_keys::{TYPE, type_values}; +use crate::runtime::task::{InputConfig, OutputConfig, ProcessorConfig, WasmTaskConfig}; +use serde_yaml::Value; +use std::sync::Arc; + +pub struct PythonBuilder; + +impl PythonBuilder { + pub fn build( + task_name: String, + yaml_value: &Value, + modules: &[(String, Vec)], + ) -> Result, Box> + { + let config_type = yaml_value + .get(TYPE) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Missing '{}' field in YAML config", TYPE), + )) as Box + })?; + + if config_type != type_values::PYTHON { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid config type '{}', expected '{}'", + config_type, + type_values::PYTHON + ), + )) as Box); + } + + let task_config = WasmTaskConfig::from_yaml_value(task_name.clone(), yaml_value)?; + + let total_inputs: usize = task_config + .input_groups + .iter() + .map(|group| group.inputs.len()) + .sum(); + + log::debug!( + "Parsed python config: {} input groups ({} total inputs), {} outputs, processor: {}", + task_config.input_groups.len(), + total_inputs, + task_config.outputs.len(), + task_config.processor.name + ); + + let mut all_inputs = Vec::new(); + for (group_idx, input_group) in task_config.input_groups.iter().enumerate() { + let group_inputs = Self::create_inputs_from_config(&input_group.inputs, group_idx) + .map_err(|e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to create input sources for input group #{}: {}", + group_idx + 1, + e + ), + )) + })?; + log::debug!( + "Created {} input source(s) for input group #{}", + group_inputs.len(), + group_idx + 1 + ); + all_inputs.extend(group_inputs); + } + log::debug!( + "Created {} total input source(s) from {} input group(s)", + all_inputs.len(), + task_config.input_groups.len() + ); + + let outputs = Self::create_outputs_from_config(&task_config.outputs)?; + log::debug!("Created {} output(s)", outputs.len()); + + let processor = Self::create_processor_from_config(&task_config.processor, modules)?; + log::debug!("Created python processor: {}", task_config.processor.name); + + let task = WasmTask::new( + task_config.task_name.clone(), + type_values::PYTHON.to_string(), + all_inputs, + processor, + outputs, + ); + let task = Arc::new(task); + + log::debug!( + "WasmTask created successfully for python task: {}", + task_config.task_name + ); + + Ok(Box::new(Arc::try_unwrap(task).map_err( + |_| -> Box { + Box::new(std::io::Error::other("Failed to unwrap Arc")) + }, + )?)) + } + + fn create_inputs_from_config( + inputs: &[InputConfig], + group_idx: usize, + ) -> Result>, Box> { + InputSourceProvider::from_input_configs(inputs, group_idx) + } + + fn create_processor_from_config( + processor_config: &ProcessorConfig, + modules: &[(String, Vec)], + ) -> Result, Box> { + // Get python wasm engine and component for reuse + let (custom_engine, custom_component) = get_python_engine_and_component().map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Failed to get python wasm engine and component: {}", e), + )) + }, + )?; + + // Clone the Component (Component implements Clone via Arc) + let processor_impl = WasmProcessorImpl::new_with_custom_engine_and_component( + processor_config.name.clone(), + modules, + processor_config.init_config.clone(), + custom_engine, + (*custom_component).clone(), + ); + + Ok(Box::new(processor_impl)) + } + + fn create_outputs_from_config( + outputs: &[OutputConfig], + ) -> Result>, Box> { + OutputSinkProvider::from_output_configs(outputs) + } +} diff --git a/src/runtime/task/builder/sink/mod.rs b/src/runtime/task/builder/sink/mod.rs new file mode 100644 index 00000000..8e47d8eb --- /dev/null +++ b/src/runtime/task/builder/sink/mod.rs @@ -0,0 +1,69 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Sink Builder - Sink type task builder +// +// Specifically handles building logic for Sink type configuration (future support) + +use crate::runtime::processor::wasm::wasm_task::WasmTask; +use crate::runtime::task::yaml_keys::{TYPE, type_values}; +use serde_yaml::Value; +use std::sync::Arc; + +/// SinkBuilder - Sink type task builder +pub struct SinkBuilder; + +impl SinkBuilder { + /// Create Sink type task from YAML configuration + /// + /// # Arguments + /// - `task_name`: Task name + /// - `yaml_value`: YAML configuration value (root-level configuration) + /// - `wasm_path`: wasm module path (optional) + /// + /// # Returns + /// - `Ok(Arc)`: Successfully created task (future support) + /// - `Err(...)`: Currently not implemented, returns error + pub fn build( + _task_name: String, + yaml_value: &Value, + _module_bytes: Vec, + ) -> Result, Box> { + // Validate configuration type + let config_type = yaml_value + .get(TYPE) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Missing '{}' field in YAML config", TYPE), + )) as Box + })?; + + if config_type != type_values::SINK { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid config type '{}', expected '{}'", + config_type, + type_values::SINK + ), + )) as Box); + } + + // TODO: Implement Sink type task building logic + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::Unsupported, + "Sink type task builder is not yet implemented", + )) as Box) + } +} diff --git a/src/runtime/task/builder/source/mod.rs b/src/runtime/task/builder/source/mod.rs new file mode 100644 index 00000000..3a37c6d4 --- /dev/null +++ b/src/runtime/task/builder/source/mod.rs @@ -0,0 +1,69 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source Builder - Source type task builder +// +// Specifically handles building logic for Source type configuration (future support) + +use crate::runtime::processor::wasm::wasm_task::WasmTask; +use crate::runtime::task::yaml_keys::{TYPE, type_values}; +use serde_yaml::Value; +use std::sync::Arc; + +/// SourceBuilder - Source type task builder +pub struct SourceBuilder; + +impl SourceBuilder { + /// Create Source type task from YAML configuration + /// + /// # Arguments + /// - `task_name`: Task name + /// - `yaml_value`: YAML configuration value (root-level configuration) + /// - `wasm_path`: wasm module path (optional) + /// + /// # Returns + /// - `Ok(Arc)`: Successfully created task (future support) + /// - `Err(...)`: Currently not implemented, returns error + pub fn build( + _task_name: String, + yaml_value: &Value, + _module_bytes: Vec, + ) -> Result, Box> { + // Validate configuration type + let config_type = yaml_value + .get(TYPE) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Missing '{}' field in YAML config", TYPE), + )) as Box + })?; + + if config_type != type_values::SOURCE { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Invalid config type '{}', expected '{}'", + config_type, + type_values::SOURCE + ), + )) as Box); + } + + // TODO: Implement Source type task building logic + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::Unsupported, + "Source type task builder is not yet implemented", + )) as Box) + } +} diff --git a/src/runtime/task/builder/task_builder.rs b/src/runtime/task/builder/task_builder.rs new file mode 100644 index 00000000..ede1cd2d --- /dev/null +++ b/src/runtime/task/builder/task_builder.rs @@ -0,0 +1,180 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Task Builder - Factory for creating tasks from YAML configuration +//! +//! Provides unified factory methods to create TaskLifecycle instances from YAML config. +//! Dispatches to specific builders (Processor, Source, Sink, Python) based on task type. + +use crate::runtime::task::TaskLifecycle; +use crate::runtime::task::builder::processor::ProcessorBuilder; +#[cfg(feature = "python")] +use crate::runtime::task::builder::python::PythonBuilder; +use crate::runtime::task::builder::sink::SinkBuilder; +use crate::runtime::task::builder::source::SourceBuilder; +use crate::runtime::task::yaml_keys::{NAME, TYPE, type_values}; +use serde_yaml::Value; +use std::sync::Arc; + +/// Type alias for builder results +pub type BuildResult = Result, Box>; + +/// Task builder error +fn build_error(msg: impl Into) -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + msg.into(), + )) +} + +/// Factory for creating tasks from configuration +pub struct TaskBuilder; + +impl TaskBuilder { + /// Create a task from YAML configuration and module bytes + /// + /// # Arguments + /// * `config_bytes` - YAML configuration as bytes + /// * `module_bytes` - WASM/Python module bytes + /// + /// # Returns + /// A boxed TaskLifecycle implementation based on the task type in config + pub fn from_yaml_config(config_bytes: &[u8], module_bytes: &[u8]) -> BuildResult { + let yaml_value = Self::parse_yaml(config_bytes)?; + let task_name = Self::extract_task_name(&yaml_value)?; + let task_type = Self::extract_task_type(&yaml_value, &task_name)?; + + Self::build_task(&task_type, task_name, &yaml_value, module_bytes.to_vec()) + } + + /// Create a Python task from YAML configuration (for fs-exec) + /// + /// This method is specifically for Python functions executed via fs-exec. + /// It forces the task type to Python regardless of the config's type field. + /// + /// # Arguments + /// * `config_bytes` - YAML configuration as bytes + /// * `modules` - Python modules as (name, bytes) pairs + #[cfg(feature = "python")] + pub fn from_python_config(config_bytes: &[u8], modules: &[(String, Vec)]) -> BuildResult { + let yaml_value = Self::parse_yaml(config_bytes)?; + let task_name = Self::extract_task_name(&yaml_value)?; + + log::debug!("Creating Python task '{}' via fs-exec", task_name); + PythonBuilder::build(task_name, &yaml_value, modules) + } + + /// Parse YAML configuration + fn parse_yaml(config_bytes: &[u8]) -> Result> { + serde_yaml::from_slice(config_bytes).map_err(|e| { + let preview: String = String::from_utf8_lossy(config_bytes) + .chars() + .take(500) + .collect(); + log::error!("Failed to parse YAML config: {}. Preview:\n{}", e, preview); + build_error(format!("Failed to parse YAML: {}", e)) + }) + } + + /// Extract task name from YAML + fn extract_task_name(yaml: &Value) -> Result> { + yaml.get(NAME) + .and_then(|v| v.as_str()) + .map(String::from) + .ok_or_else(|| { + let keys = Self::get_yaml_keys(yaml); + log::error!("Missing '{}' field. Available keys: {:?}", NAME, keys); + build_error(format!("Missing '{}' field in config", NAME)) + }) + } + + /// Extract task type from YAML + fn extract_task_type( + yaml: &Value, + task_name: &str, + ) -> Result> { + yaml.get(TYPE) + .and_then(|v| v.as_str()) + .map(String::from) + .ok_or_else(|| { + log::error!("Missing '{}' field for task '{}'", TYPE, task_name); + build_error(format!("Missing '{}' field for task '{}'", TYPE, task_name)) + }) + } + + /// Get available keys from YAML for error messages + fn get_yaml_keys(yaml: &Value) -> Vec { + yaml.as_mapping() + .map(|m| { + m.keys() + .filter_map(|k| k.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default() + } + + /// Build task based on type + fn build_task( + task_type: &str, + task_name: String, + yaml: &Value, + module_bytes: Vec, + ) -> BuildResult { + match task_type { + type_values::PROCESSOR => Self::build_wasm_task( + ProcessorBuilder::build(task_name.clone(), yaml, module_bytes), + &task_name, + ), + type_values::SOURCE => Self::build_wasm_task( + SourceBuilder::build(task_name.clone(), yaml, module_bytes), + &task_name, + ), + type_values::SINK => Self::build_wasm_task( + SinkBuilder::build(task_name.clone(), yaml, module_bytes), + &task_name, + ), + _ => { + log::error!("Unsupported task type: {}", task_type); + Err(build_error(format!("Unsupported task type: {}", task_type))) + } + } + } + + /// Build and unwrap WASM task from Arc + fn build_wasm_task( + result: Result< + Arc, + Box, + >, + task_name: &str, + ) -> BuildResult { + let arc = result.map_err(|e| { + log::error!("Failed to build task '{}': {}", task_name, e); + e + })?; + + Arc::try_unwrap(arc) + .map(|task| Box::new(task) as Box) + .map_err(|arc| { + let refs = Arc::strong_count(&arc); + log::error!( + "Failed to unwrap Arc for '{}': {} references", + task_name, + refs + ); + build_error(format!( + "Task '{}' has {} active references", + task_name, refs + )) + }) + } +} diff --git a/src/runtime/task/lifecycle.rs b/src/runtime/task/lifecycle.rs new file mode 100644 index 00000000..d7d32522 --- /dev/null +++ b/src/runtime/task/lifecycle.rs @@ -0,0 +1,178 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Task Lifecycle - Task lifecycle management interface +// +// Defines the complete lifecycle management interface for Task, including initialization, start, stop, checkpoint and close + +use crate::runtime::common::ComponentState; +use crate::runtime::taskexecutor::InitContext; +use crate::storage::task::FunctionInfo; + +/// Task lifecycle management interface +/// +/// Defines complete lifecycle management methods for Task, following standard state transition flow: +/// ```ignore +/// Uninitialized -> Initialized -> Starting -> Running +/// | +/// v +/// Checkpointing +/// | +/// v +/// Stopping -> Stopped +/// | +/// v +/// Closing -> Closed +/// ``` +/// +/// All methods should be called in appropriate thread context and follow state machine transition rules. +pub trait TaskLifecycle: Send + Sync { + /// Initialize task with initialization context + /// + /// Called before task is used to perform necessary initialization work, including: + /// - Load configuration + /// - Initialize resources + /// - Prepare runtime environment + /// + /// State transition: Uninitialized -> Initialized + /// + /// # Arguments + /// - `init_context`: Initialization context containing state storage, task storage and other resources + /// + /// # Returns + /// - `Ok(())`: Initialization successful + /// - `Err(...)`: Initialization failed + fn init_with_context( + &mut self, + init_context: &InitContext, + ) -> Result<(), Box>; + + /// Start task + /// + /// Start task execution, begin processing data stream. + /// Before calling this method, the task should have completed initialization. + /// + /// State transition: Initialized/Stopped -> Starting -> Running + /// + /// # Returns + /// - `Ok(())`: Start successful + /// - `Err(...)`: Start failed + fn start(&mut self) -> Result<(), Box>; + + /// Stop task + /// + /// Stop task execution, but keep resources available, can be restarted. + /// After stopping, the task no longer processes new data, but processed data state is preserved. + /// + /// State transition: Running/Checkpointing -> Stopping -> Stopped + /// + /// # Returns + /// - `Ok(())`: Stop successful + /// - `Err(...)`: Stop failed + fn stop(&mut self) -> Result<(), Box>; + + /// Execute checkpoint + /// + /// Save current task state for failure recovery. + /// Checkpoint operation should be atomic, ensuring state consistency. + /// + /// State transition: Running -> Checkpointing -> Running + /// + /// # Arguments + /// - `checkpoint_id`: Checkpoint ID for identification and recovery + /// + /// # Returns + /// - `Ok(())`: Checkpoint successful + /// - `Err(...)`: Checkpoint failed + fn take_checkpoint( + &mut self, + checkpoint_id: u64, + ) -> Result<(), Box>; + + /// Close task + /// + /// Release all task resources, the task will no longer be usable. + /// Before closing, task execution should be stopped first. + /// + /// State transition: Running/Stopped -> Closing -> Closed + /// + /// # Returns + /// - `Ok(())`: Close successful + /// - `Err(...)`: Close failed + fn close(&mut self) -> Result<(), Box>; + + /// Get current state + /// + /// Returns the current lifecycle state of the task. + /// + /// # Returns + /// Current state of the task + fn get_state(&self) -> ComponentState; + + /// Get task name + /// + /// Returns the name of the task. + /// + /// # Returns + /// Name of the task + fn get_name(&self) -> &str; + + fn get_function_info(&self) -> FunctionInfo; + + /// Check if task is running + /// + /// # Returns + /// - `true`: Task is running (Running or Checkpointing state) + /// - `false`: Task is not running + fn is_running(&self) -> bool { + self.get_state().is_running() + } + + /// Check if task is closed + /// + /// # Returns + /// - `true`: Task is closed + /// - `false`: Task is not closed + fn is_closed(&self) -> bool { + self.get_state().is_closed() + } + + /// Check if task is in error state + /// + /// # Returns + /// - `true`: Task is in error state + /// - `false`: Task is not in error state + fn is_error(&self) -> bool { + self.get_state().is_error() + } + + /// Execute Python function dynamically + /// + /// This method calls fs_exec to load and execute Python code dynamically. + /// Default implementation returns an error indicating this is not supported. + /// + /// # Arguments + /// * `class_name` - Name of the Python class to load + /// * `modules` - List of modules (name, bytes) to load + /// + /// # Returns + /// Ok(()) if execution succeeds, or an error if it fails + fn exec_python_function( + &self, + _class_name: &str, + _modules: &[(String, Vec)], + ) -> Result<(), Box> { + Err(Box::new(std::io::Error::other( + "exec_python_function not supported by this task type", + ))) + } +} diff --git a/src/runtime/task/mod.rs b/src/runtime/task/mod.rs new file mode 100644 index 00000000..3660aff7 --- /dev/null +++ b/src/runtime/task/mod.rs @@ -0,0 +1,22 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Task module - Task lifecycle management + +mod builder; +mod lifecycle; +mod processor_config; +mod yaml_keys; + +pub use builder::TaskBuilder; +pub use lifecycle::*; +pub use processor_config::{InputConfig, OutputConfig, ProcessorConfig, WasmTaskConfig}; diff --git a/src/runtime/task/processor_config.rs b/src/runtime/task/processor_config.rs new file mode 100644 index 00000000..64cd5752 --- /dev/null +++ b/src/runtime/task/processor_config.rs @@ -0,0 +1,606 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Task Configuration - Configuration structs for task components +//! +//! Defines configuration structures for Input, Processor, and Output components. + +use serde::{Deserialize, Serialize}; +use serde_yaml::Value; +use std::collections::HashMap; + +// ============================================================================ +// Input Configuration +// ============================================================================ + +/// InputConfig - Input source configuration +/// +/// Supports multiple input source types, each with its corresponding configuration structure. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "input-type", rename_all = "kebab-case")] +pub enum InputConfig { + /// Kafka input source configuration + Kafka { + /// Kafka server address + bootstrap_servers: String, + /// Topic name + topic: String, + /// Partition number (optional, uses subscribe auto-assignment if not specified) + #[serde(default)] + partition: Option, + /// Consumer group ID + group_id: String, + /// Additional Kafka configuration options (optional) + #[serde(flatten)] + extra: HashMap, + }, +} + +impl InputConfig { + /// Parse InputConfig from YAML Value + /// + /// # Arguments + /// - `value`: YAML Value object + /// + /// # Returns + /// - `Ok(InputConfig)`: Successfully parsed + /// - `Err(...)`: Parsing failed + pub fn from_yaml_value(value: &Value) -> Result> { + // Try to deserialize using serde + let config: InputConfig = serde_yaml::from_value(value.clone()).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to parse input config: {}", e), + )) + }, + )?; + Ok(config) + } + + /// Get input source type name + pub fn input_type(&self) -> &'static str { + match self { + InputConfig::Kafka { .. } => "kafka", + } + } +} + +// ============================================================================ +// Input Group Configuration +// ============================================================================ + +/// InputGroup - Input group configuration +/// +/// An input group contains multiple input source configurations. +/// A wasm task can contain multiple input groups, each group can contain multiple input sources. +#[derive(Debug, Clone)] +pub struct InputGroup { + /// Input source configuration list + /// + /// An input group can contain multiple input sources that are processed together. + pub inputs: Vec, +} + +impl InputGroup { + /// Create a new input group + /// + /// # Arguments + /// - `inputs`: Input source configuration list + /// + /// # Returns + /// - `InputGroup`: The created input group + pub fn new(inputs: Vec) -> Self { + Self { inputs } + } + + /// Parse InputGroup from YAML Value + /// + /// # Arguments + /// - `value`: YAML Value object (should be an object containing an inputs array) + /// + /// # Returns + /// - `Ok(InputGroup)`: Successfully parsed + /// - `Err(...)`: Parsing failed + pub fn from_yaml_value(value: &Value) -> Result> { + // If value itself is an array, parse directly as input source list + if let Some(inputs_seq) = value.as_sequence() { + let mut inputs = Vec::new(); + for (idx, input_value) in inputs_seq.iter().enumerate() { + let input_type = input_value + .get("input-type") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + + let input = InputConfig::from_yaml_value(input_value).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to parse input #{} (type: {}) in input group: {}", + idx + 1, + input_type, + e + ), + )) + }, + )?; + + let parsed_type = input.input_type(); + if parsed_type != input_type && input_type != "unknown" { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Input #{} type mismatch in input group: expected '{}', but got '{}'", + idx + 1, + input_type, + parsed_type + ), + )) as Box); + } + + inputs.push(input); + } + return Ok(InputGroup::new(inputs)); + } + + // If value is an object, try to get the inputs field + if let Some(inputs_value) = value.get("inputs") + && let Some(inputs_seq) = inputs_value.as_sequence() + { + let mut inputs = Vec::new(); + for (idx, input_value) in inputs_seq.iter().enumerate() { + let input_type = input_value + .get("input-type") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + + let input = InputConfig::from_yaml_value(input_value).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to parse input #{} (type: {}) in input group: {}", + idx + 1, + input_type, + e + ), + )) + }, + )?; + + let parsed_type = input.input_type(); + if parsed_type != input_type && input_type != "unknown" { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Input #{} type mismatch in input group: expected '{}', but got '{}'", + idx + 1, + input_type, + parsed_type + ), + )) as Box); + } + + inputs.push(input); + } + return Ok(InputGroup::new(inputs)); + } + + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid input group format: expected an array of inputs or an object with 'inputs' field", + )) as Box) + } +} + +// ============================================================================ +// Processor Configuration +// ============================================================================ + +/// ProcessorConfig - Processor configuration +/// +/// Contains basic processor information. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProcessorConfig { + /// Processor name + pub name: String, + + /// Whether to use built-in Event serialization + /// + /// If true, uses the system's built-in Event serialization method. + /// If false or not set, uses default serialization method. + #[serde(default)] + pub use_builtin_event_serialization: bool, + /// Whether to enable CheckPoint + /// + /// If true, enables checkpoint functionality. + /// If false or not set, checkpoints are disabled. + #[serde(default)] + pub enable_checkpoint: bool, + /// CheckPoint time interval (seconds) + /// + /// Time interval for checkpoints, minimum value is 1 second. + /// If not set or less than 1, uses default value of 1 second. + #[serde(default = "default_checkpoint_interval")] + pub checkpoint_interval_seconds: u64, + /// WASM initialization configuration (optional) + /// + /// Configuration parameters passed to wasm module's fs_init function. + /// If not configured, an empty Map is passed. + #[serde(default)] + pub init_config: HashMap, +} + +/// Default checkpoint interval (1 second) +fn default_checkpoint_interval() -> u64 { + 1 +} + +impl ProcessorConfig { + /// Parse ProcessorConfig from YAML Value + /// + /// # Arguments + /// - `value`: YAML Value object (root level config containing name, input-groups, outputs, etc.) + /// + /// # Returns + /// - `Ok(ProcessorConfig)`: Successfully parsed + /// - `Err(...)`: Parsing failed + /// + /// Note: Extracts processor-related fields from root level config, ignoring `type`, `input-groups`, `outputs`, etc. + pub fn from_yaml_value(value: &Value) -> Result> { + // Create a new Value containing only processor-related fields, excluding type, input-groups, outputs, etc. + let mut processor_value = serde_yaml::Mapping::new(); + + // Copy name field (if exists) + if let Some(name_val) = value.get("name") { + processor_value.insert( + serde_yaml::Value::String("name".to_string()), + name_val.clone(), + ); + } + + // Copy other processor-related fields (if exist) + if let Some(use_builtin) = value.get("use_builtin_event_serialization") { + processor_value.insert( + serde_yaml::Value::String("use_builtin_event_serialization".to_string()), + use_builtin.clone(), + ); + } + if let Some(enable_checkpoint) = value.get("enable_checkpoint") { + processor_value.insert( + serde_yaml::Value::String("enable_checkpoint".to_string()), + enable_checkpoint.clone(), + ); + } + if let Some(checkpoint_interval) = value.get("checkpoint_interval_seconds") { + processor_value.insert( + serde_yaml::Value::String("checkpoint_interval_seconds".to_string()), + checkpoint_interval.clone(), + ); + } + + if let Some(init_config_val) = value.get("init_config") { + processor_value.insert( + serde_yaml::Value::String("init_config".to_string()), + init_config_val.clone(), + ); + } + + // Parse ProcessorConfig from cleaned Value + let clean_value = serde_yaml::Value::Mapping(processor_value); + let mut config: ProcessorConfig = serde_yaml::from_value(clean_value).map_err( + |e| -> Box { + let available_keys: Vec = value + .as_mapping() + .map(|m| { + m.keys() + .filter_map(|k| k.as_str().map(|s| s.to_string())) + .collect() + }) + .unwrap_or_default(); + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to parse processor config: {}. Available keys in root: {:?}", + e, available_keys + ), + )) + }, + )?; + + // If name is not provided, use default value + if config.name.is_empty() { + config.name = "default-processor".to_string(); + } + + // Validate and fix checkpoint_interval_seconds (minimum value is 1 second) + if config.checkpoint_interval_seconds < 1 { + config.checkpoint_interval_seconds = 1; + } + + Ok(config) + } +} + +// ============================================================================ +// Output Configuration +// ============================================================================ + +/// OutputConfig - Output configuration +/// +/// Supports multiple output types, each with its corresponding configuration structure. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "output-type", rename_all = "kebab-case")] +pub enum OutputConfig { + /// Kafka output sink configuration + Kafka { + /// Kafka server address + bootstrap_servers: String, + /// Topic name + topic: String, + /// Partition number + partition: u32, + /// Additional Kafka configuration options (optional) + #[serde(flatten)] + extra: HashMap, + }, +} + +impl OutputConfig { + /// Parse OutputConfig from YAML Value + /// + /// # Arguments + /// - `value`: YAML Value object + /// + /// # Returns + /// - `Ok(OutputConfig)`: Successfully parsed + /// - `Err(...)`: Parsing failed + pub fn from_yaml_value(value: &Value) -> Result> { + let config: OutputConfig = serde_yaml::from_value(value.clone()).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to parse output config: {}", e), + )) + }, + )?; + Ok(config) + } + + /// Get output type name + pub fn output_type(&self) -> &'static str { + match self { + OutputConfig::Kafka { .. } => "kafka", + } + } +} + +// ============================================================================ +// WasmTask Configuration +// ============================================================================ + +/// WasmTaskConfig - WASM task configuration +/// +/// Contains configuration for Input, Processor, and Output components. +#[derive(Debug, Clone)] +pub struct WasmTaskConfig { + /// Task name + pub task_name: String, + /// Input group configuration list + /// + /// This is an array that can contain multiple input groups. + /// Each input group contains multiple input source configurations, + /// supporting reading from multiple data sources simultaneously. + /// For example: can have multiple input groups, each containing inputs from multiple Kafka topics. + pub input_groups: Vec, + /// Processor configuration + pub processor: ProcessorConfig, + /// Output configuration list + /// + /// This is an array that can contain multiple output configurations. + /// Each output configuration represents a set of outputs, + /// supporting writing to multiple data sources simultaneously. + /// For example: can write to multiple Kafka topics simultaneously. + pub outputs: Vec, +} + +impl WasmTaskConfig { + /// Parse WasmTaskConfig from YAML Value + /// + /// # Arguments + /// - `task_name`: Task name (used if config doesn't have a name field) + /// - `value`: YAML Value object (root level config containing name, type, input-groups, outputs, etc.) + /// + /// # Returns + /// - `Ok(WasmTaskConfig)`: Successfully parsed + /// - `Err(...)`: Parsing failed + /// + /// Configuration format: + /// ```yaml + /// name: "my-task" + /// type: processor + /// input-groups: [...] + /// outputs: [...] + /// ``` + pub fn from_yaml_value( + task_name: String, + value: &Value, + ) -> Result> { + use crate::runtime::task::yaml_keys::{INPUT_GROUPS, INPUTS, NAME, OUTPUTS}; + + // 1. Get name from config (if exists), otherwise use the passed task_name + let config_name = value + .get(NAME) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .unwrap_or(task_name); + + // 2. Parse Processor config (extract processor-related fields from root level config) + // Note: Config is at root level, ProcessorConfig needs to parse from root level + let mut processor = ProcessorConfig::from_yaml_value(value)?; + + // If ProcessorConfig name is empty, use config name + if processor.name.is_empty() { + processor.name = config_name.clone(); + } + + // 3. Parse Input Groups config + // Supports two formats: + // 1. Direct inputs array (backward compatible, parsed as single input group) + // 2. input-groups array, each element is an input group + let input_groups_value = value + .get(INPUT_GROUPS) + .or_else(|| value.get(INPUTS)) // Backward compatible + .ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Missing '{}' or '{}' in processor config", + INPUT_GROUPS, INPUTS + ), + )) as Box + })?; + + let input_groups_seq = input_groups_value.as_sequence().ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid 'input-groups' or 'inputs' format, expected a list", + )) as Box + })?; + + if input_groups_seq.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Empty 'input-groups' or 'inputs' list in config", + )) as Box); + } + + const MAX_INPUT_GROUPS: usize = 64; + if input_groups_seq.len() > MAX_INPUT_GROUPS { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Too many input groups: {} (maximum is {})", + input_groups_seq.len(), + MAX_INPUT_GROUPS + ), + )) as Box); + } + + // Parse each input group config + // input-groups is an array, each element represents an input group (containing multiple input sources) + let mut input_groups = Vec::new(); + for (group_idx, group_value) in input_groups_seq.iter().enumerate() { + let input_group = InputGroup::from_yaml_value(group_value).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to parse input group #{}: {}", group_idx + 1, e), + )) + }, + )?; + + // Validate input group is not empty + if input_group.inputs.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Input group #{} is empty", group_idx + 1), + )) as Box); + } + + input_groups.push(input_group); + } + + // 4. Parse Outputs config + let outputs_value = value.get(OUTPUTS).ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Missing 'outputs' in processor config", + )) as Box + })?; + + let outputs_seq = outputs_value.as_sequence().ok_or_else(|| { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid 'outputs' format, expected a list", + )) as Box + })?; + + if outputs_seq.is_empty() { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Empty 'outputs' list in config", + )) as Box); + } + + const MAX_OUTPUTS: usize = 64; + if outputs_seq.len() > MAX_OUTPUTS { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Too many outputs: {} (maximum is {})", + outputs_seq.len(), + MAX_OUTPUTS + ), + )) as Box); + } + + // Parse each output config + // outputs is an array, each element represents an output configuration + let mut outputs = Vec::new(); + for (idx, output_value) in outputs_seq.iter().enumerate() { + // Try to get output type for clearer error message + let output_type = output_value + .get("output-type") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + + let output = OutputConfig::from_yaml_value(output_value).map_err( + |e| -> Box { + Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Failed to parse output #{} (type: {}): {}", + idx + 1, + output_type, + e + ), + )) + }, + )?; + + // Validate output type matches + let parsed_type = output.output_type(); + if parsed_type != output_type && output_type != "unknown" { + return Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!( + "Output #{} type mismatch: expected '{}', but got '{}'", + idx + 1, + output_type, + parsed_type + ), + )) as Box); + } + + outputs.push(output); + } + + Ok(WasmTaskConfig { + task_name: config_name, + input_groups, + processor, + outputs, + }) + } +} diff --git a/src/runtime/task/yaml_keys.rs b/src/runtime/task/yaml_keys.rs new file mode 100644 index 00000000..64edb04e --- /dev/null +++ b/src/runtime/task/yaml_keys.rs @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// YAML Configuration Keys - YAML configuration key name constants +// +// Defines all key name constants used in YAML configuration files + +/// Configuration type key name +/// +/// Used to specify configuration type, supported values: +/// - "processor": Processor type configuration +/// - "python": python runtime configuration +/// - "source": Source type configuration (future support) +/// - "sink": Sink type configuration (future support) +pub const TYPE: &str = "type"; + +/// Task name key name +/// +/// Used to specify task name +pub const NAME: &str = "name"; + +/// Input groups key name +/// +/// Used to specify input group configuration list +pub const INPUT_GROUPS: &str = "input-groups"; + +/// Input source key name (backward compatible) +/// +/// Used to specify input source configuration list (backward compatible, equivalent to input-groups) +pub const INPUTS: &str = "inputs"; + +/// Output key name +/// +/// Used to specify output sink configuration list +pub const OUTPUTS: &str = "outputs"; + +/// Configuration type value constants +pub mod type_values { + /// Processor configuration type value + pub const PROCESSOR: &str = "processor"; + + /// python runtime configuration type value + pub const PYTHON: &str = "python"; + + /// Source configuration type value (future support) + pub const SOURCE: &str = "source"; + + /// Sink configuration type value (future support) + pub const SINK: &str = "sink"; +} diff --git a/src/runtime/taskexecutor/init_context.rs b/src/runtime/taskexecutor/init_context.rs new file mode 100644 index 00000000..272eadc5 --- /dev/null +++ b/src/runtime/taskexecutor/init_context.rs @@ -0,0 +1,53 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Init Context - Initialization context +// +// Provides various resources needed for task initialization, including state storage, task storage, thread pool, etc. + +use crate::runtime::processor::wasm::thread_pool::{TaskThreadPool, ThreadGroup}; +use crate::storage::state_backend::StateStorageServer; +use crate::storage::task::TaskStorage; +use std::sync::{Arc, Mutex}; + +#[derive(Clone)] +pub struct InitContext { + pub state_storage_server: Arc, + pub task_storage: Arc, + pub thread_pool: Arc, + pub thread_group_registry: Arc>>, +} + +impl InitContext { + pub fn new( + state_storage_server: Arc, + task_storage: Arc, + thread_pool: Arc, + ) -> Self { + Self { + state_storage_server, + task_storage, + thread_pool, + thread_group_registry: Arc::new(Mutex::new(Vec::new())), + } + } + + pub fn register_thread_group(&self, thread_group: ThreadGroup) { + let mut registry = self.thread_group_registry.lock().unwrap(); + registry.push(thread_group); + } + + pub fn take_thread_groups(&self) -> Vec { + let mut registry = self.thread_group_registry.lock().unwrap(); + std::mem::take(&mut *registry) + } +} diff --git a/src/runtime/taskexecutor/mod.rs b/src/runtime/taskexecutor/mod.rs new file mode 100644 index 00000000..466f000e --- /dev/null +++ b/src/runtime/taskexecutor/mod.rs @@ -0,0 +1,19 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TaskExecutor module + +mod init_context; +pub mod task_manager; + +pub use init_context::InitContext; +pub use task_manager::TaskManager; diff --git a/src/runtime/taskexecutor/task_manager.rs b/src/runtime/taskexecutor/task_manager.rs new file mode 100644 index 00000000..562a2e2d --- /dev/null +++ b/src/runtime/taskexecutor/task_manager.rs @@ -0,0 +1,322 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::config::GlobalConfig; +use crate::runtime::common::ComponentState; +use crate::runtime::processor::wasm::thread_pool::{GlobalTaskThreadPool, TaskThreadPool}; +use crate::runtime::task::{TaskBuilder, TaskLifecycle}; +use crate::runtime::taskexecutor::init_context::InitContext; +use crate::storage::state_backend::StateStorageServer; +use crate::storage::task::{ + FunctionInfo, StoredTaskInfo, TaskModuleBytes, TaskStorage, TaskStorageFactory, +}; + +use anyhow::{Context, Result, anyhow}; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::sync::{Arc, OnceLock}; +use std::time::{SystemTime, UNIX_EPOCH}; + +pub struct TaskManager { + tasks: Arc>>>>>, + state_storage_server: Arc, + task_storage: Arc, + thread_pool: Arc, +} + +static GLOBAL_INSTANCE: OnceLock> = OnceLock::new(); + +impl TaskManager { + pub fn init(config: &GlobalConfig) -> Result<()> { + if GLOBAL_INSTANCE.get().is_some() { + return Err(anyhow!("TaskManager singleton already initialized")); + } + + let _ = GlobalTaskThreadPool::get_or_create(); + + let manager = + Arc::new(Self::init_internal(config).context("Failed to construct TaskManager")?); + manager + .recover_tasks_from_storage() + .context("Failed to recover persisted tasks")?; + + GLOBAL_INSTANCE + .set(manager) + .map_err(|_| anyhow!("Concurrency error during TaskManager singleton assignment"))?; + + Ok(()) + } + + pub fn get() -> Result> { + GLOBAL_INSTANCE + .get() + .cloned() + .ok_or_else(|| anyhow!("TaskManager not initialized. Call init() first.")) + } + + fn init_internal(config: &GlobalConfig) -> Result { + let thread_pool = GlobalTaskThreadPool::get_or_create(); + + let state_storage_server = Arc::new( + StateStorageServer::new(config.state_storage.clone()) + .map_err(|e| anyhow!("Failed to create state storage server: {}", e))?, + ); + + let task_storage = Arc::from(TaskStorageFactory::create_storage(&config.task_storage)?); + + Ok(Self { + tasks: Arc::new(RwLock::new(HashMap::new())), + state_storage_server, + task_storage, + thread_pool, + }) + } +} + +impl TaskManager { + pub fn register_task(&self, config_bytes: &[u8], module_bytes: &[u8]) -> Result<()> { + let task = TaskBuilder::from_yaml_config(config_bytes, module_bytes) + .map_err(|e| anyhow!("Failed to build task: {}", e))?; + let info = task.get_function_info(); + let task_info = StoredTaskInfo { + name: info.name, + task_type: info.task_type, + module_bytes: Some(TaskModuleBytes::Wasm(module_bytes.to_vec())), + config_bytes: config_bytes.to_vec(), + state: ComponentState::Initialized, + created_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + checkpoint_id: None, + }; + self.register_task_internal(task, Some(task_info)) + } + + pub fn register_python_task( + &self, + config_bytes: &[u8], + modules: &[(String, Vec)], + ) -> Result<()> { + #[cfg(feature = "python")] + { + let task = TaskBuilder::from_python_config(config_bytes, modules) + .map_err(|e| anyhow!("Failed to build Python task: {}", e))?; + let (class_name, module_name, module_bytes) = match modules.first() { + Some((name, bytes)) => (name.clone(), name.clone(), Some(bytes.clone())), + None => (String::new(), String::new(), None), + }; + let info = task.get_function_info(); + let task_info = StoredTaskInfo { + name: info.name, + task_type: info.task_type, + module_bytes: Some(TaskModuleBytes::Python { + class_name, + module: module_name, + bytes: module_bytes, + }), + config_bytes: config_bytes.to_vec(), + state: ComponentState::Initialized, + created_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + checkpoint_id: None, + }; + self.register_task_internal(task, Some(task_info)) + } + #[cfg(not(feature = "python"))] + { + let _ = (config_bytes, modules); + Err(anyhow!("Python feature disabled in this build")) + } + } + + pub fn start_task(&self, name: &str) -> Result<()> { + let task = self.get_task_handle(name)?; + task.write() + .start() + .map_err(|e| anyhow!("Failed to start task: {}", e)) + } + + pub fn stop_task(&self, name: &str) -> Result<()> { + let task = self.get_task_handle(name)?; + task.write() + .stop() + .map_err(|e| anyhow!("Failed to stop task: {}", e)) + } + + pub fn close_task(&self, name: &str) -> Result<()> { + let task = self.get_task_handle(name)?; + task.write() + .close() + .map_err(|e| anyhow!("Failed to close task: {}", e)) + } + + pub fn remove_task(&self, name: &str) -> Result<()> { + let task_handle = self.get_task_handle(name)?; + + { + let mut handle = task_handle.write(); + if !handle.get_state().is_closed() { + handle + .close() + .map_err(|e| anyhow!("Failed to close task before removal: {}", e))?; + } + } + + self.tasks.write().remove(name); + self.task_storage + .delete_task(name) + .context("Failed to remove task from persistent storage")?; + + log::info!(target: "task_manager", "Task '{}' successfully purged", name); + Ok(()) + } + + pub fn take_checkpoint(&self, name: &str, checkpoint_id: u64) -> Result<()> { + let task = self.get_task_handle(name)?; + task.write() + .take_checkpoint(checkpoint_id) + .map_err(|e| anyhow!("Checkpoint failed: {}", e)) + } +} + +impl TaskManager { + pub fn list_all_functions(&self) -> Vec { + let tasks = self.tasks.read(); + tasks + .iter() + .map(|(_, task_arc)| task_arc.read().get_function_info()) + .collect() + } + + pub fn get_task_status(&self, name: &str) -> Result { + Ok(self.get_task_handle(name)?.read().get_state()) + } + + pub fn state_storage_server(&self) -> Arc { + Arc::clone(&self.state_storage_server) + } + + pub fn task_storage(&self) -> Arc { + Arc::clone(&self.task_storage) + } + + pub fn thread_pool(&self) -> Arc { + Arc::clone(&self.thread_pool) + } +} + +impl TaskManager { + fn register_task_internal( + &self, + task: Box, + task_info_to_store: Option, + ) -> Result<()> { + let task_name = task.get_name().to_string(); + + if self.tasks.read().contains_key(&task_name) { + return Err(anyhow!("Task uniqueness violation: '{}'", task_name)); + } + + let task_arc = Arc::new(RwLock::new(task)); + + { + let mut registry = self.tasks.write(); + registry.insert(task_name.clone(), Arc::clone(&task_arc)); + } + + let init_context = InitContext::new( + self.state_storage_server.clone(), + self.task_storage.clone(), + self.thread_pool.clone(), + ); + + let mut handle = task_arc.write(); + handle + .init_with_context(&init_context) + .map_err(|e| anyhow!("Failed to init task '{}': {}", task_name, e))?; + handle + .start() + .map_err(|e| anyhow!("Failed to start task '{}': {}", task_name, e))?; + + if let Some(ref info) = task_info_to_store { + self.task_storage + .create_task(info) + .context("Failed to persist task to storage")?; + } + + log::info!( + target: "task_manager", + "Task '{}' initialized and started", + task_name + ); + Ok(()) + } + + fn recover_tasks_from_storage(&self) -> Result<()> { + let stored_tasks = self.task_storage.list_all_tasks()?; + + for stored in stored_tasks { + if let Err(e) = self.recover_one_task(&stored) { + log::error!( + target: "task_manager", + "Recovery failed for {}: {:?}", + stored.name, + e + ); + } + } + Ok(()) + } + + fn recover_one_task(&self, stored: &StoredTaskInfo) -> Result<()> { + if self.tasks.read().contains_key(&stored.name) { + return Ok(()); + } + + let task = match &stored.module_bytes { + None => TaskBuilder::from_yaml_config(&stored.config_bytes, &[]), + Some(TaskModuleBytes::Wasm(bytes)) => { + TaskBuilder::from_yaml_config(&stored.config_bytes, bytes) + } + Some(TaskModuleBytes::Python { + class_name: _, + module, + bytes: py_bytes, + }) => { + #[cfg(feature = "python")] + { + let modules = [(module.clone(), py_bytes.clone().unwrap_or_default())]; + TaskBuilder::from_python_config(&stored.config_bytes, &modules) + } + #[cfg(not(feature = "python"))] + { + let _ = (module, py_bytes); + return Err(anyhow!("Python task recovery skipped: feature disabled")); + } + } + } + .map_err(|e| anyhow!("Failed to rebuild task from storage: {}", e))?; + + self.register_task_internal(task, None) + } + + fn get_task_handle(&self, name: &str) -> Result>>> { + self.tasks + .read() + .get(name) + .cloned() + .ok_or_else(|| anyhow!("Task '{}' not found in registry", name)) + } +} diff --git a/src/server/handler.rs b/src/server/handler.rs new file mode 100644 index 00000000..4721a5a1 --- /dev/null +++ b/src/server/handler.rs @@ -0,0 +1,399 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; +use std::time::Instant; + +use arrow_ipc::writer::StreamWriter; +use log::{error, info}; +use tonic::{Request, Response as TonicResponse, Status}; + +use protocol::service::FunctionInfo as ProtoFunctionInfo; +use protocol::service::{ + CreateFunctionRequest, CreatePythonFunctionRequest, DropFunctionRequest, Response, + ShowFunctionsRequest, ShowFunctionsResponse, SqlRequest, StartFunctionRequest, StatusCode, + StopFunctionRequest, function_stream_service_server::FunctionStreamService, +}; + +use crate::coordinator::Coordinator; +use crate::coordinator::{ + CreateFunction, CreatePythonFunction, DataSet, DropFunction, ShowFunctions, + ShowFunctionsResult, StartFunction, Statement, StopFunction, +}; +use crate::sql::SqlParser; + +pub struct FunctionStreamServiceImpl { + coordinator: Arc, +} + +impl FunctionStreamServiceImpl { + pub fn new(coordinator: Arc) -> Self { + Self { coordinator } + } + + fn build_response(status_code: StatusCode, message: String, data: Option>) -> Response { + Response { + status_code: status_code as i32, + message, + data, + } + } + + fn data_set_to_ipc_bytes(ds: &dyn DataSet) -> Option> { + let batch = ds.to_record_batch(); + let mut buf = Vec::new(); + { + let mut writer = StreamWriter::try_new(&mut buf, &batch.schema()).ok()?; + writer.write(&batch).ok()?; + writer.finish().ok()?; + } + Some(buf) + } +} + +#[tonic::async_trait] +impl FunctionStreamService for FunctionStreamServiceImpl { + async fn execute_sql( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + + let parse_start = Instant::now(); + let stmt = match SqlParser::parse(&req.sql) { + Ok(stmt) => { + log::debug!("SQL parsed in {}ms", parse_start.elapsed().as_millis()); + stmt + } + Err(e) => { + return Ok(TonicResponse::new(Self::build_response( + StatusCode::BadRequest, + format!("Parse error: {}", e), + None, + ))); + } + }; + + let exec_start = Instant::now(); + let result = self.coordinator.execute(stmt.as_ref()); + log::debug!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Ok + } else { + error!("Execution failed: {}", result.message); + StatusCode::InternalServerError + }; + + log::debug!( + "Total SQL request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + result + .data + .as_ref() + .and_then(|ds| Self::data_set_to_ipc_bytes(ds.as_ref())), + ))) + } + + async fn create_function( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + info!( + "Received CreateFunction request. Config size: {}, Function size: {}", + req.config_bytes.len(), + req.function_bytes.len() + ); + + let config_bytes = if !req.config_bytes.is_empty() { + Some(req.config_bytes) + } else { + None + }; + + let stmt = CreateFunction::from_bytes(req.function_bytes, config_bytes); + + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Created + } else { + error!("CreateFunction failed: {}", result.message); + StatusCode::InternalServerError + }; + + info!( + "Total CreateFunction request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + result + .data + .as_ref() + .and_then(|ds| Self::data_set_to_ipc_bytes(ds.as_ref())), + ))) + } + + async fn create_python_function( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + info!( + "Received CreatePythonFunction request. Class name: {}, Modules: {}", + req.class_name, + req.modules.len() + ); + + // Convert proto modules to PythonModule + let modules: Vec = req + .modules + .into_iter() + .map(|m| crate::coordinator::PythonModule { + name: m.module_name, + bytes: m.module_bytes, + }) + .collect(); + + if modules.is_empty() { + return Ok(TonicResponse::new(Self::build_response( + StatusCode::BadRequest, + "At least one module is required".to_string(), + None, + ))); + } + + let stmt = CreatePythonFunction::new(req.class_name, modules, req.config_content); + + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Created + } else { + error!("CreatePythonFunction failed: {}", result.message); + StatusCode::InternalServerError + }; + + info!( + "Total CreatePythonFunction request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + result + .data + .as_ref() + .and_then(|ds| Self::data_set_to_ipc_bytes(ds.as_ref())), + ))) + } + + async fn drop_function( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + info!( + "Received DropFunction request: function_name={}", + req.function_name + ); + + let stmt = DropFunction::new(req.function_name); + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Ok + } else { + error!("DropFunction failed: {}", result.message); + StatusCode::InternalServerError + }; + + info!( + "Total DropFunction request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + None, + ))) + } + + async fn show_functions( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let _req = request.into_inner(); + info!("Received ShowFunctions request"); + + let stmt = ShowFunctions::new(); + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let (status_code, message) = if result.success { + (StatusCode::Ok as i32, result.message) + } else { + error!("ShowFunctions failed: {}", result.message); + (StatusCode::InternalServerError as i32, result.message) + }; + + let functions: Vec = result + .data + .as_ref() + .and_then(|arc_ds| { + (arc_ds.as_ref() as &dyn std::any::Any).downcast_ref::() + }) + .map(|sfr| { + sfr.functions() + .iter() + .map(|f| ProtoFunctionInfo { + name: f.name.clone(), + task_type: f.task_type.clone(), + status: f.status.clone(), + }) + .collect() + }) + .unwrap_or_default(); + + info!( + "Total ShowFunctions request cost: {}ms, count={}", + start_time.elapsed().as_millis(), + functions.len() + ); + + Ok(TonicResponse::new(ShowFunctionsResponse { + status_code, + message, + functions, + })) + } + + async fn start_function( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + info!( + "Received StartFunction request: function_name={}", + req.function_name + ); + + let stmt = StartFunction::new(req.function_name); + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Ok + } else { + error!("StartFunction failed: {}", result.message); + StatusCode::InternalServerError + }; + + info!( + "Total StartFunction request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + None, + ))) + } + + async fn stop_function( + &self, + request: Request, + ) -> Result, Status> { + let start_time = Instant::now(); + let req = request.into_inner(); + info!( + "Received StopFunction request: function_name={}", + req.function_name + ); + + let stmt = StopFunction::new(req.function_name); + let exec_start = Instant::now(); + let result = self.coordinator.execute(&stmt as &dyn Statement); + info!( + "Coordinator execution finished in {}ms", + exec_start.elapsed().as_millis() + ); + + let status_code = if result.success { + StatusCode::Ok + } else { + error!("StopFunction failed: {}", result.message); + StatusCode::InternalServerError + }; + + info!( + "Total StopFunction request cost: {}ms", + start_time.elapsed().as_millis() + ); + + Ok(TonicResponse::new(Self::build_response( + status_code, + result.message, + None, + ))) + } +} + +impl Default for FunctionStreamServiceImpl { + fn default() -> Self { + Self::new(Arc::new(Coordinator::new())) + } +} diff --git a/src/server/initializer.rs b/src/server/initializer.rs new file mode 100644 index 00000000..506cbf82 --- /dev/null +++ b/src/server/initializer.rs @@ -0,0 +1,159 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::config::GlobalConfig; +use anyhow::{Context, Result}; + +type InitializerFn = fn(&GlobalConfig) -> Result<()>; + +#[derive(Clone)] +struct Component { + name: &'static str, + initializer: InitializerFn, +} + +#[derive(Default)] +pub struct ComponentRegistryBuilder { + components: Vec, +} + +impl ComponentRegistryBuilder { + #[inline] + pub fn new() -> Self { + Self::with_capacity(8) + } + + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self { + components: Vec::with_capacity(capacity), + } + } + + #[inline] + pub fn register(mut self, name: &'static str, initializer: InitializerFn) -> Self { + self.components.push(Component { name, initializer }); + self + } + + #[inline] + pub fn build(self) -> ComponentRegistry { + ComponentRegistry { + components: self.components, + } + } +} + +pub struct ComponentRegistry { + components: Vec, +} + +impl ComponentRegistry { + pub fn initialize_all(&self, config: &GlobalConfig) -> Result<()> { + if self.components.is_empty() { + log::warn!("No components registered for initialization"); + return Ok(()); + } + + log::info!("Initializing {} components...", self.components.len()); + + for (idx, component) in self.components.iter().enumerate() { + let start = std::time::Instant::now(); + log::debug!( + "[{}/{}] Initializing component: {}", + idx + 1, + self.components.len(), + component.name + ); + + (component.initializer)(config) + .with_context(|| format!("Component '{}' initialization failed", component.name))?; + + let elapsed = start.elapsed(); + log::debug!( + "[{}/{}] Component '{}' initialized successfully in {:?}", + idx + 1, + self.components.len(), + component.name, + elapsed + ); + } + + log::info!( + "All {} components initialized successfully", + self.components.len() + ); + Ok(()) + } + + #[inline] + pub fn len(&self) -> usize { + self.components.len() + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.components.is_empty() + } +} + +fn initialize_wasm_cache(config: &GlobalConfig) -> Result<()> { + crate::runtime::processor::wasm::wasm_cache::set_cache_config( + crate::runtime::processor::wasm::wasm_cache::WasmCacheConfig { + enabled: config.wasm.enable_cache, + cache_dir: crate::config::paths::resolve_path(&config.wasm.cache_dir), + max_size: config.wasm.max_cache_size, + }, + ); + log::info!( + "WASM cache configuration: enabled={}, dir={}, max_size={} bytes", + config.wasm.enable_cache, + config.wasm.cache_dir, + config.wasm.max_cache_size + ); + Ok(()) +} + +fn initialize_task_manager(config: &GlobalConfig) -> Result<()> { + crate::runtime::taskexecutor::TaskManager::init(config) + .context("TaskManager initialization failed")?; + Ok(()) +} + +#[cfg(feature = "python")] +fn initialize_python_service(config: &GlobalConfig) -> Result<()> { + crate::runtime::processor::python::PythonService::initialize(config) + .context("Python Runtime initialization failed")?; + Ok(()) +} + +fn initialize_coordinator(_config: &GlobalConfig) -> Result<()> { + crate::runtime::taskexecutor::TaskManager::get() + .context("Coordinator requires TaskManager to be initialized first")?; + log::info!("Coordinator verified and ready"); + Ok(()) +} + +pub fn register_components() -> ComponentRegistry { + let mut builder = ComponentRegistryBuilder::new() + .register("WasmCache", initialize_wasm_cache) + .register("TaskManager", initialize_task_manager); + + #[cfg(feature = "python")] + { + builder = builder.register("PythonService", initialize_python_service); + } + + builder + .register("Coordinator", initialize_coordinator) + .build() +} diff --git a/src/server/mod.rs b/src/server/mod.rs new file mode 100644 index 00000000..03254af3 --- /dev/null +++ b/src/server/mod.rs @@ -0,0 +1,21 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Server module for function-stream + +mod handler; +mod initializer; +mod service; + +pub use handler::FunctionStreamServiceImpl; +pub use initializer::register_components; +pub use service::start_server_with_shutdown; diff --git a/src/server/service.rs b/src/server/service.rs new file mode 100644 index 00000000..8ffb178c --- /dev/null +++ b/src/server/service.rs @@ -0,0 +1,63 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::net::SocketAddr; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use tokio::net::TcpListener; +use tokio_stream::wrappers::TcpListenerStream; +use tonic::transport::Server; + +use crate::config::GlobalConfig; +use crate::coordinator::Coordinator; +use crate::server::FunctionStreamServiceImpl; +use protocol::service::function_stream_service_server::FunctionStreamServiceServer; + +pub async fn start_server_with_shutdown( + config: &GlobalConfig, + shutdown_rx: tokio::sync::oneshot::Receiver<()>, + ready_tx: Option>, +) -> Result<()> { + let addr_str = format!("{}:{}", config.service.host, config.service.port); + let addr: SocketAddr = addr_str + .parse() + .with_context(|| format!("Invalid address format: {}", addr_str))?; + + let listener = TcpListener::bind(addr) + .await + .with_context(|| format!("Failed to bind to address: {}", addr))?; + + log::info!("gRPC server listening on {}", addr); + + if let Some(tx) = ready_tx { + let _ = tx.send(()); + } + + let coordinator = Arc::new(Coordinator::new()); + let service_impl = FunctionStreamServiceImpl::new(coordinator); + + let incoming = TcpListenerStream::new(listener); + + Server::builder() + .add_service(FunctionStreamServiceServer::new(service_impl)) + .serve_with_incoming_shutdown(incoming, async { + shutdown_rx.await.ok(); + log::info!("Shutdown signal received, stopping gRPC server..."); + }) + .await + .with_context(|| "gRPC server runtime error")?; + + log::info!("gRPC server stopped"); + + Ok(()) +} diff --git a/src/sql/grammar.pest b/src/sql/grammar.pest new file mode 100644 index 00000000..15f70dd7 --- /dev/null +++ b/src/sql/grammar.pest @@ -0,0 +1,134 @@ +// ============================================================================= +// FUNCTION SQL Grammar +// +// Using pest PEG syntax, referencing ANTLR style +// ============================================================================= + +// ============================================================================= +// 1. Whitespace (automatically skipped) +// ============================================================================= + +WHITESPACE = _{ " " | "\t" | "\r" | "\n" } + +// ============================================================================= +// 2. Keywords (case-insensitive) +// ============================================================================= + +kw_create = _{ C ~ R ~ E ~ A ~ T ~ E } +kw_drop = _{ D ~ R ~ O ~ P } +kw_start = _{ S ~ T ~ A ~ R ~ T } +kw_stop = _{ S ~ T ~ O ~ P } +kw_show = _{ S ~ H ~ O ~ W } +kw_with = _{ W ~ I ~ T ~ H } +kw_function = _{ F ~ U ~ N ~ C ~ T ~ I ~ O ~ N } +kw_functions = _{ F ~ U ~ N ~ C ~ T ~ I ~ O ~ N ~ S } + +// ============================================================================= +// 3. Operators & Symbols +// ============================================================================= + +LPAREN = _{ "(" } +RPAREN = _{ ")" } +COMMA = _{ "," } +EQ = _{ "=" } +SQUOTE = _{ "'" } +DQUOTE = _{ "\"" } + +// ============================================================================= +// 4. Literals +// ============================================================================= + +// String literal (single or double quotes) +string_literal = @{ + SQUOTE ~ string_inner_single ~ SQUOTE | + DQUOTE ~ string_inner_double ~ DQUOTE +} + +string_inner_single = @{ (!(SQUOTE | "\\") ~ ANY | escape_seq)* } +string_inner_double = @{ (!(DQUOTE | "\\") ~ ANY | escape_seq)* } +escape_seq = @{ "\\" ~ ANY } + +// ============================================================================= +// 5. Identifiers +// ============================================================================= + +// Task name identifier +identifier = @{ (ASCII_ALPHA | "_") ~ (ASCII_ALPHANUMERIC | "_" | "-")* } + +// ============================================================================= +// 6. Statements +// ============================================================================= + +// Entry rule +statement = _{ + SOI ~ ( + create_stmt | + drop_stmt | + start_stmt | + stop_stmt | + show_stmt + ) ~ EOI +} + +// CREATE FUNCTION WITH (...) +// Note: name is read from config file, not from SQL statement +create_stmt = { kw_create ~ kw_function ~ kw_with ~ properties } + +// DROP FUNCTION name +drop_stmt = { kw_drop ~ kw_function ~ identifier } + +// START FUNCTION name +start_stmt = { kw_start ~ kw_function ~ identifier } + +// STOP FUNCTION name +stop_stmt = { kw_stop ~ kw_function ~ identifier } + +// SHOW FUNCTIONS +show_stmt = { kw_show ~ kw_functions } + +// ============================================================================= +// 7. Properties +// ============================================================================= + +// Property list ('key'='value', ...) +properties = { LPAREN ~ property ~ (COMMA ~ property)* ~ RPAREN } + +// Single property 'key'='value' +property = { property_key ~ EQ ~ property_value } + +// Property key (string) +property_key = { string_literal } + +// Property value (string) +property_value = { string_literal } + +// ============================================================================= +// 8. Character Fragments (for case-insensitive matching) +// ============================================================================= + +A = _{ "A" | "a" } +B = _{ "B" | "b" } +C = _{ "C" | "c" } +D = _{ "D" | "d" } +E = _{ "E" | "e" } +F = _{ "F" | "f" } +G = _{ "G" | "g" } +H = _{ "H" | "h" } +I = _{ "I" | "i" } +J = _{ "J" | "j" } +K = _{ "K" | "k" } +L = _{ "L" | "l" } +M = _{ "M" | "m" } +N = _{ "N" | "n" } +O = _{ "O" | "o" } +P = _{ "P" | "p" } +Q = _{ "Q" | "q" } +R = _{ "R" | "r" } +S = _{ "S" | "s" } +T = _{ "T" | "t" } +U = _{ "U" | "u" } +V = _{ "V" | "v" } +W = _{ "W" | "w" } +X = _{ "X" | "x" } +Y = _{ "Y" | "y" } +Z = _{ "Z" | "z" } diff --git a/src/sql/mod.rs b/src/sql/mod.rs new file mode 100644 index 00000000..ed3c2e30 --- /dev/null +++ b/src/sql/mod.rs @@ -0,0 +1,15 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod parser; + +pub use parser::SqlParser; diff --git a/src/sql/parser/mod.rs b/src/sql/parser/mod.rs new file mode 100644 index 00000000..11f4b18e --- /dev/null +++ b/src/sql/parser/mod.rs @@ -0,0 +1,42 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod sql_parser; + +pub use sql_parser::SqlParser; + +#[derive(Debug)] +pub struct ParseError { + pub message: String, +} + +impl std::fmt::Display for ParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Parse error: {}", self.message) + } +} + +impl std::error::Error for ParseError {} + +impl From for ParseError { + fn from(message: String) -> Self { + ParseError { message } + } +} + +impl ParseError { + pub fn new(message: impl Into) -> Self { + Self { + message: message.into(), + } + } +} diff --git a/src/sql/parser/sql_parser.rs b/src/sql/parser/sql_parser.rs new file mode 100644 index 00000000..90cd51c1 --- /dev/null +++ b/src/sql/parser/sql_parser.rs @@ -0,0 +1,249 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use pest::Parser; +use pest_derive::Parser; + +use super::ParseError; +use crate::coordinator::{ + CreateFunction, DropFunction, ShowFunctions, StartFunction, Statement, StopFunction, +}; +use std::collections::HashMap; + +#[derive(Parser)] +#[grammar = "src/sql/grammar.pest"] +struct Grammar; + +#[derive(Debug, Default)] +pub struct SqlParser; + +impl SqlParser { + pub fn parse(sql: &str) -> Result, ParseError> { + let pairs = Grammar::parse(Rule::statement, sql) + .map_err(|e| ParseError::new(format!("Parse error: {}", e)))?; + + for pair in pairs { + return match pair.as_rule() { + Rule::create_stmt => { + handle_create_stmt(pair).map(|stmt| stmt as Box) + } + Rule::drop_stmt => handle_drop_stmt(pair).map(|stmt| stmt as Box), + Rule::start_stmt => handle_start_stmt(pair).map(|stmt| stmt as Box), + Rule::stop_stmt => handle_stop_stmt(pair).map(|stmt| stmt as Box), + Rule::show_stmt => handle_show_stmt(pair).map(|stmt| stmt as Box), + _ => continue, + }; + } + + Err(ParseError::new("Unknown statement type")) + } +} + +fn handle_create_stmt( + pair: pest::iterators::Pair, +) -> Result, ParseError> { + let mut inner = pair.into_inner(); + // Note: name is read from config file, not from SQL statement + // Pass empty string here, name will be read from config file later + let properties = inner + .next() + .map(parse_properties) + .ok_or_else(|| ParseError::new("Missing WITH clause"))?; + + Ok(Box::new( + CreateFunction::from_properties(properties).map_err(|e| ParseError::from(e))?, + )) +} + +fn handle_drop_stmt(pair: pest::iterators::Pair) -> Result, ParseError> { + let mut inner = pair.into_inner(); + let name = inner.next().map(extract_string).unwrap_or_default(); + Ok(Box::new(DropFunction::new(name))) +} + +fn handle_start_stmt(pair: pest::iterators::Pair) -> Result, ParseError> { + let mut inner = pair.into_inner(); + let name = inner.next().map(extract_string).unwrap_or_default(); + Ok(Box::new(StartFunction::new(name))) +} + +fn handle_stop_stmt(pair: pest::iterators::Pair) -> Result, ParseError> { + let mut inner = pair.into_inner(); + let name = inner.next().map(extract_string).unwrap_or_default(); + Ok(Box::new(StopFunction::new(name))) +} + +fn handle_show_stmt(_pair: pest::iterators::Pair) -> Result, ParseError> { + Ok(Box::new(ShowFunctions::new())) +} + +fn extract_string(pair: pest::iterators::Pair) -> String { + match pair.as_rule() { + Rule::string_literal => { + let s = pair.as_str(); + if (s.starts_with('\'') && s.ends_with('\'')) + || (s.starts_with('"') && s.ends_with('"')) + { + unescape_string(&s[1..s.len() - 1]) + } else { + unescape_string(s) + } + } + Rule::identifier => pair.as_str().to_string(), + _ => pair.as_str().to_string(), + } +} + +fn unescape_string(s: &str) -> String { + let mut result = String::with_capacity(s.len()); + let mut chars = s.chars().peekable(); + + while let Some(ch) = chars.next() { + if ch == '\\' { + if let Some(&next) = chars.peek() { + chars.next(); + match next { + 'n' => result.push('\n'), + 't' => result.push('\t'), + 'r' => result.push('\r'), + '\\' => result.push('\\'), + '\'' => result.push('\''), + '"' => result.push('"'), + _ => { + result.push('\\'); + result.push(next); + } + } + } else { + result.push(ch); + } + } else { + result.push(ch); + } + } + + result +} + +fn parse_properties(pair: pest::iterators::Pair) -> HashMap { + let mut properties = HashMap::new(); + + for prop in pair.into_inner() { + if prop.as_rule() == Rule::property { + let mut inner = prop.into_inner(); + if let (Some(key_pair), Some(val_pair)) = (inner.next(), inner.next()) { + let key = key_pair + .into_inner() + .next() + .map(extract_string) + .unwrap_or_default(); + let value = val_pair + .into_inner() + .next() + .map(extract_string) + .unwrap_or_default(); + properties.insert(key, value); + } + } + } + + properties +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_function() { + let sql = + "CREATE FUNCTION WITH ('function_path'='./test.wasm', 'config_path'='./config.yml')"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + #[test] + fn test_create_function_minimal() { + let sql = "CREATE FUNCTION WITH ('function_path'='./processor.wasm')"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + // Note: SQL only supports Path mode, not Bytes mode + // Bytes mode is only for gRPC requests + + #[test] + fn test_drop_function() { + let sql = "DROP FUNCTION my_task"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + #[test] + fn test_start_function() { + let sql = "START FUNCTION my_task"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + #[test] + fn test_stop_function() { + let sql = "STOP FUNCTION my_task"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + #[test] + fn test_show_functions() { + let sql = "SHOW FUNCTIONS"; + let stmt = SqlParser::parse(sql).unwrap(); + } + + #[test] + fn test_case_insensitive_keywords() { + let sql1 = "create function with ('function_path'='./test.wasm')"; + let stmt1 = SqlParser::parse(sql1).unwrap(); + + let sql2 = "Create Function With ('Function_Path'='./test.wasm')"; + let stmt2 = SqlParser::parse(sql2).unwrap(); + + let sql3 = "show functions"; + let stmt3 = SqlParser::parse(sql3).unwrap(); + + let sql4 = "start function my_task"; + let stmt4 = SqlParser::parse(sql4).unwrap(); + } + + #[test] + fn test_case_insensitive_property_keys() { + let sql1 = + "CREATE FUNCTION WITH ('function_path'='./test.wasm', 'config_path'='./config.yml')"; + let stmt1 = SqlParser::parse(sql1).unwrap(); + + let sql2 = + "CREATE FUNCTION WITH ('Function_Path'='./test.wasm', 'Config_Path'='./config.yml')"; + let stmt2 = SqlParser::parse(sql2).unwrap(); + + let sql3 = + "CREATE FUNCTION WITH ('FUNCTION_PATH'='./test.wasm', 'CONFIG_PATH'='./config.yml')"; + let stmt3 = SqlParser::parse(sql3).unwrap(); + + // Note: SQL only supports Path mode (function_path, config_path) + // Bytes mode (function, config) is only for gRPC requests + } + + #[test] + fn test_with_extra_properties() { + let sql = r#"CREATE FUNCTION WITH ( + 'function_path'='./test.wasm', + 'config_path'='./config.yml', + 'parallelism'='4', + 'memory-limit'='256mb' + )"#; + let stmt = SqlParser::parse(sql).unwrap(); + } +} diff --git a/src/storage/mod.rs b/src/storage/mod.rs new file mode 100644 index 00000000..a4898619 --- /dev/null +++ b/src/storage/mod.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod state_backend; +pub mod task; diff --git a/src/storage/state_backend/error.rs b/src/storage/state_backend/error.rs new file mode 100644 index 00000000..530327fa --- /dev/null +++ b/src/storage/state_backend/error.rs @@ -0,0 +1,37 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// State backend error +#[derive(Debug, Clone)] +pub enum BackendError { + /// Key not found + KeyNotFound(String), + /// IO error + IoError(String), + /// Serialization error + SerializationError(String), + /// Other error + Other(String), +} + +impl std::fmt::Display for BackendError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BackendError::KeyNotFound(key) => write!(f, "Key not found: {}", key), + BackendError::IoError(msg) => write!(f, "IO error: {}", msg), + BackendError::SerializationError(msg) => write!(f, "Serialization error: {}", msg), + BackendError::Other(msg) => write!(f, "Error: {}", msg), + } + } +} + +impl std::error::Error for BackendError {} diff --git a/src/storage/state_backend/factory.rs b/src/storage/state_backend/factory.rs new file mode 100644 index 00000000..5dc38632 --- /dev/null +++ b/src/storage/state_backend/factory.rs @@ -0,0 +1,73 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::store::StateStore; +use std::path::Path; +use std::sync::Arc; + +/// State store factory interface +/// +/// All state store factories should implement this interface +pub trait StateStoreFactory: Send + Sync { + /// Create a new state store instance + /// + /// # Arguments + /// - `column_family`: optional column family name (some implementations may not support) + /// + /// # Returns + /// - `Ok(Box)`: successfully created + /// - `Err(BackendError)`: creation failed + fn new_state_store( + &self, + column_family: Option, + ) -> Result, BackendError>; +} + +/// Factory type enumeration +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FactoryType { + /// Memory factory + Memory, + /// RocksDB factory + RocksDB, +} + +pub fn get_factory_for_task>( + factory_type: FactoryType, + task_name: String, + created_at: u64, + base_dir: Option

, + rocksdb_config: Option, +) -> Result, BackendError> { + match factory_type { + FactoryType::Memory => { + Ok(crate::storage::state_backend::memory::MemoryStateStoreFactory::default_factory()) + } + FactoryType::RocksDB => { + let base_dir = base_dir.ok_or_else(|| { + BackendError::Other("base_dir is required for RocksDB factory".to_string()) + })?; + + let db_path = base_dir + .as_ref() + .join(format!("{}-{}", task_name, created_at)); + + let config = rocksdb_config.unwrap_or_default(); + let factory = crate::storage::state_backend::rocksdb::RocksDBStateStoreFactory::new( + db_path, config, + )?; + + Ok(Arc::new(factory)) + } + } +} diff --git a/src/storage/state_backend/key_builder.rs b/src/storage/state_backend/key_builder.rs new file mode 100644 index 00000000..b65af6ed --- /dev/null +++ b/src/storage/state_backend/key_builder.rs @@ -0,0 +1,88 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Build complex key +/// +/// Format: keyGroup | key | namespace | userKey +pub fn build_key(key_group: &[u8], key: &[u8], namespace: &[u8], user_key: &[u8]) -> Vec { + let total_len = key_group.len() + key.len() + namespace.len() + user_key.len(); + let mut result = Vec::with_capacity(total_len); + + result.extend_from_slice(key_group); + result.extend_from_slice(key); + result.extend_from_slice(namespace); + result.extend_from_slice(user_key); + + result +} + +/// Increment key (for range deletion) +/// +/// Increment the last byte of the key by 1, carrying forward if overflow +/// Used to create the upper bound for range deletion +pub fn increment_key(key: &[u8]) -> Vec { + if key.is_empty() { + return vec![0]; + } + + let mut result = key.to_vec(); + + for i in (0..result.len()).rev() { + if result[i] < 0xFF { + result[i] += 1; + return result; + } else { + result[i] = 0; + } + } + + result.push(0); + result +} + +/// Check if key is all 0xFF +pub fn is_all_0xff(key: &[u8]) -> bool { + !key.is_empty() && key.iter().all(|&b| b == 0xFF) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_build_key() { + let key = build_key(b"group", b"key", b"namespace", b"user"); + assert!(key.len() > 0); + } + + #[test] + fn test_increment_key() { + let key1 = vec![0x01, 0x02, 0x03]; + let inc1 = increment_key(&key1); + assert_eq!(inc1, vec![0x01, 0x02, 0x04]); + + let key2 = vec![0x01, 0x02, 0xFF]; + let inc2 = increment_key(&key2); + assert_eq!(inc2, vec![0x01, 0x03, 0x00]); + + let key3 = vec![0xFF, 0xFF]; + let inc3 = increment_key(&key3); + assert_eq!(inc3, vec![0x00, 0x00, 0x00]); + } + + #[test] + fn test_is_all_0xff() { + assert!(is_all_0xff(&[0xFF, 0xFF])); + assert!(!is_all_0xff(&[0xFF, 0xFE])); + assert!(!is_all_0xff(&[])); + } +} diff --git a/src/storage/state_backend/memory/factory.rs b/src/storage/state_backend/memory/factory.rs new file mode 100644 index 00000000..b62bd444 --- /dev/null +++ b/src/storage/state_backend/memory/factory.rs @@ -0,0 +1,49 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::store::MemoryStateStore; +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::factory::StateStoreFactory; +use std::sync::{Arc, Mutex}; + +pub struct MemoryStateStoreFactory {} + +impl MemoryStateStoreFactory { + pub fn new() -> Self { + Self {} + } + + pub fn default_factory() -> Arc { + static FACTORY: Mutex>> = Mutex::new(None); + + let mut factory = FACTORY.lock().unwrap(); + if factory.is_none() { + *factory = Some(Arc::new(MemoryStateStoreFactory::new())); + } + factory.as_ref().unwrap().clone() + } +} + +impl Default for MemoryStateStoreFactory { + fn default() -> Self { + Self::new() + } +} + +impl StateStoreFactory for MemoryStateStoreFactory { + fn new_state_store( + &self, + _column_family: Option, + ) -> Result, BackendError> { + Ok(Box::new(MemoryStateStore::new())) + } +} diff --git a/src/storage/state_backend/memory/mod.rs b/src/storage/state_backend/memory/mod.rs new file mode 100644 index 00000000..d4f8b375 --- /dev/null +++ b/src/storage/state_backend/memory/mod.rs @@ -0,0 +1,16 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod factory; +pub mod store; + +pub use factory::MemoryStateStoreFactory; diff --git a/src/storage/state_backend/memory/store.rs b/src/storage/state_backend/memory/store.rs new file mode 100644 index 00000000..10a547ac --- /dev/null +++ b/src/storage/state_backend/memory/store.rs @@ -0,0 +1,190 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::store::{StateIterator, StateStore}; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; + +/// Memory state store +pub struct MemoryStateStore { + /// Internal storage + storage: Arc, Vec>>>, +} + +impl MemoryStateStore { + /// Create a new memory state store + pub fn new() -> Self { + Self { + storage: Arc::new(Mutex::new(HashMap::new())), + } + } +} + +impl Default for MemoryStateStore { + fn default() -> Self { + Self::new() + } +} + +impl StateStore for MemoryStateStore { + fn put_state(&self, key: Vec, value: Vec) -> Result<(), BackendError> { + let mut storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + storage.insert(key, value); + Ok(()) + } + + fn get_state(&self, key: Vec) -> Result>, BackendError> { + let storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + Ok(storage.get(&key).cloned()) + } + + fn delete_state(&self, key: Vec) -> Result<(), BackendError> { + let mut storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + storage.remove(&key); + Ok(()) + } + + fn list_states( + &self, + start_inclusive: Vec, + end_exclusive: Vec, + ) -> Result>, BackendError> { + let storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + + let mut keys: Vec> = storage + .keys() + .filter(|k| *k >= &start_inclusive && *k < &end_exclusive) + .cloned() + .collect(); + + keys.sort(); + Ok(keys) + } + + fn merge( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + value: Vec, + ) -> Result<(), BackendError> { + let key_bytes = crate::storage::state_backend::key_builder::build_key( + &key_group, &key, &namespace, &user_key, + ); + + let existing = self.get_state(key_bytes.clone())?; + + let merged = if let Some(existing_value) = existing { + let mut result = existing_value; + result.push(b'\0'); + result.extend_from_slice(&value); + result + } else { + value + }; + + self.put_state(key_bytes, merged) + } + + fn delete_prefix_bytes(&self, prefix: Vec) -> Result { + let mut storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + + let keys_to_delete: Vec> = storage + .keys() + .filter(|k| k.starts_with(&prefix)) + .cloned() + .collect(); + + let count = keys_to_delete.len(); + for key in keys_to_delete { + storage.remove(&key); + } + + Ok(count) + } + + fn scan(&self, prefix: Vec) -> Result, BackendError> { + let storage = self + .storage + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + + let mut pairs = Vec::new(); + for (key, value) in storage.iter() { + if key.starts_with(&prefix) { + pairs.push((key.clone(), value.clone())); + } + } + + Ok(Box::new(MemoryStateIterator { + pairs: Arc::new(Mutex::new(pairs)), + index: Arc::new(Mutex::new(0)), + })) + } +} + +/// Memory state iterator +struct MemoryStateIterator { + pairs: Arc, Vec)>>>, + index: Arc>, +} + +impl StateIterator for MemoryStateIterator { + fn has_next(&mut self) -> Result { + let pairs = self + .pairs + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + let index = self + .index + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + + Ok(*index < pairs.len()) + } + + fn next(&mut self) -> Result, Vec)>, BackendError> { + let pairs = self + .pairs + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + let mut index = self + .index + .lock() + .map_err(|e| BackendError::Other(format!("Lock error: {}", e)))?; + + if *index >= pairs.len() { + return Ok(None); + } + + let pair = pairs[*index].clone(); + *index += 1; + Ok(Some(pair)) + } +} diff --git a/src/storage/state_backend/mod.rs b/src/storage/state_backend/mod.rs new file mode 100644 index 00000000..f12bb033 --- /dev/null +++ b/src/storage/state_backend/mod.rs @@ -0,0 +1,25 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub const STATE_DIR_NAME: &str = "state"; + +pub mod error; +pub mod factory; +pub mod key_builder; +pub mod memory; +pub mod rocksdb; +pub mod server; +pub mod store; + +pub use factory::StateStoreFactory; +pub use server::StateStorageServer; +pub use store::{StateIterator, StateStore}; diff --git a/src/storage/state_backend/rocksdb/factory.rs b/src/storage/state_backend/rocksdb/factory.rs new file mode 100644 index 00000000..79d4c73c --- /dev/null +++ b/src/storage/state_backend/rocksdb/factory.rs @@ -0,0 +1,172 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::factory::StateStoreFactory; +use rocksdb::{DB, Options}; +use std::path::Path; +use std::sync::{Arc, Mutex}; + +/// RocksDB configuration options +#[derive(Debug, Clone, Default)] +pub struct RocksDBConfig { + /// Maximum number of open files + pub max_open_files: Option, + /// Write buffer size (bytes) + pub write_buffer_size: Option, + /// Maximum number of write buffers + pub max_write_buffer_number: Option, + /// Target file size base (bytes) + pub target_file_size_base: Option, + /// Maximum bytes for level base (bytes) + pub max_bytes_for_level_base: Option, +} + +/// Convert from configuration struct to RocksDBConfig +impl From<&crate::config::storage::RocksDBStorageConfig> for RocksDBConfig { + fn from(config: &crate::config::storage::RocksDBStorageConfig) -> Self { + Self { + max_open_files: config.max_open_files, + write_buffer_size: config.write_buffer_size, + max_write_buffer_number: config.max_write_buffer_number, + target_file_size_base: config.target_file_size_base, + max_bytes_for_level_base: config.max_bytes_for_level_base, + } + } +} + +/// RocksDB state store factory +pub struct RocksDBStateStoreFactory { + /// RocksDB database instance + db: Arc, + /// Lock for protecting column family creation operations + cf_creation_lock: Mutex<()>, +} + +impl StateStoreFactory for RocksDBStateStoreFactory { + fn new_state_store( + &self, + column_family: Option, + ) -> Result, BackendError> { + self.new_state_store(column_family) + } +} + +impl RocksDBStateStoreFactory { + /// Create a new RocksDB state store factory + /// + /// # Arguments + /// - `db_path`: database path + /// - `config`: RocksDB configuration + /// + /// # Returns + /// - `Ok(RocksDBStateStoreFactory)`: successfully created + /// - `Err(BackendError)`: creation failed + pub fn new>(db_path: P, config: RocksDBConfig) -> Result { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.create_missing_column_families(true); + + if let Some(max_open_files) = config.max_open_files { + opts.set_max_open_files(max_open_files); + } + if let Some(write_buffer_size) = config.write_buffer_size { + opts.set_write_buffer_size(write_buffer_size); + } + if let Some(max_write_buffer_number) = config.max_write_buffer_number { + opts.set_max_write_buffer_number(max_write_buffer_number); + } + if let Some(target_file_size_base) = config.target_file_size_base { + opts.set_target_file_size_base(target_file_size_base); + } + if let Some(max_bytes_for_level_base) = config.max_bytes_for_level_base { + opts.set_max_bytes_for_level_base(max_bytes_for_level_base); + } + + opts.set_merge_operator_associative("appendOp", merge_operator); + + let db_path = db_path.as_ref(); + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent) + .map_err(|e| BackendError::IoError(format!("Failed to create directory: {}", e)))?; + } + + let db = DB::open(&opts, db_path) + .map_err(|e| BackendError::IoError(format!("Failed to open RocksDB: {}", e)))?; + + Ok(Self { + db: Arc::new(db), + cf_creation_lock: Mutex::new(()), + }) + } + + /// Create a new state store instance + /// + /// # Arguments + /// - `column_family`: optional column family name, if None uses default column family + /// + /// # Returns + /// - `Ok(Box)`: successfully created + /// - `Err(BackendError)`: creation failed + /// + /// Note: If a column family name is specified and it doesn't exist, it will be created automatically + pub fn new_state_store( + &self, + column_family: Option, + ) -> Result, BackendError> { + if let Some(ref cf_name) = column_family + && cf_name != "default" + && self.db.cf_handle(cf_name).is_none() + { + let _guard = self.cf_creation_lock.lock().map_err(|e| { + BackendError::Other(format!("Failed to acquire cf creation lock: {}", e)) + })?; + + if self.db.cf_handle(cf_name).is_none() { + log::info!("Creating column family '{}' as it does not exist", cf_name); + let opts = Options::default(); + self.db.create_cf(cf_name, &opts).map_err(|e| { + BackendError::Other(format!( + "Failed to create column family '{}': {}", + cf_name, e + )) + })?; + } + } + + crate::storage::state_backend::rocksdb::store::RocksDBStateStore::new_with_factory( + self.db.clone(), + column_family, + ) + } +} + +/// Merge operator: for merging values (append operation) +fn merge_operator( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &rocksdb::MergeOperands, +) -> Option> { + use std::io::Write; + + let mut buf = Vec::new(); + + for operand in operands { + buf.write_all(operand).ok()?; + } + + if let Some(existing) = existing_val { + buf.write_all(existing).ok()?; + } + + Some(buf) +} diff --git a/src/storage/state_backend/rocksdb/mod.rs b/src/storage/state_backend/rocksdb/mod.rs new file mode 100644 index 00000000..b92df773 --- /dev/null +++ b/src/storage/state_backend/rocksdb/mod.rs @@ -0,0 +1,16 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod factory; +pub mod store; + +pub use factory::{RocksDBConfig, RocksDBStateStoreFactory}; diff --git a/src/storage/state_backend/rocksdb/store.rs b/src/storage/state_backend/rocksdb/store.rs new file mode 100644 index 00000000..b7edfa64 --- /dev/null +++ b/src/storage/state_backend/rocksdb/store.rs @@ -0,0 +1,251 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::key_builder::{build_key, increment_key, is_all_0xff}; +use crate::storage::state_backend::store::{StateIterator, StateStore}; +use rocksdb::{ + BlockBasedOptions, Cache, ColumnFamilyDescriptor, DB, DBCompressionType, Direction, + IteratorMode, Options, ReadOptions, WriteBatch, +}; +use std::path::Path; +use std::sync::Arc; + +pub struct RocksDBStateStore { + db: Arc, + cf_name: String, +} + +impl RocksDBStateStore { + /// Create a new state store instance from factory + /// + /// # Arguments + /// - `db`: database instance + /// - `column_family`: optional column family name + /// + /// # Returns + /// - `Ok(Box)`: successfully created + /// - `Err(BackendError)`: creation failed + pub fn new_with_factory( + db: Arc, + column_family: Option, + ) -> Result, BackendError> { + let cf_name = column_family.unwrap_or_else(|| "default".to_string()); + if db.cf_handle(&cf_name).is_none() { + return Err(BackendError::Other(format!( + "Column family '{}' does not exist", + cf_name + ))); + } + Ok(Box::new(Self { db, cf_name })) + } + + pub fn open>(path: P, cf_name: Option) -> Result { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.create_missing_column_families(true); + opts.set_merge_operator_associative("appendOp", merge_operator); + opts.set_compression_type(DBCompressionType::Lz4); + + let mut block_opts = BlockBasedOptions::default(); + block_opts.set_block_size(16 * 1024); + block_opts.set_cache_index_and_filter_blocks(true); + block_opts.set_block_cache(&Cache::new_lru_cache(128 * 1024 * 1024)); + opts.set_block_based_table_factory(&block_opts); + + let target_cf = cf_name.unwrap_or_else(|| "default".to_string()); + + let cf_descriptors = vec![ + ColumnFamilyDescriptor::new("default", Options::default()), + ColumnFamilyDescriptor::new(&target_cf, opts.clone()), + ]; + + let db = DB::open_cf_descriptors(&opts, path, cf_descriptors) + .map_err(|e| BackendError::IoError(e.to_string()))?; + + Ok(Self { + db: Arc::new(db), + cf_name: target_cf, + }) + } + + #[inline(always)] + fn cf_handle(&self) -> Result>, BackendError> { + self.db + .cf_handle(&self.cf_name) + .ok_or_else(|| BackendError::Other(format!("Handle for CF '{}' invalid", self.cf_name))) + } +} + +impl StateStore for RocksDBStateStore { + fn put_state(&self, key: Vec, value: Vec) -> Result<(), BackendError> { + let cf = self.cf_handle()?; + self.db + .put_cf(&cf, key, value) + .map_err(|e| BackendError::IoError(e.to_string())) + } + + fn get_state(&self, key: Vec) -> Result>, BackendError> { + let cf = self.cf_handle()?; + self.db + .get_cf(&cf, key) + .map_err(|e| BackendError::IoError(e.to_string())) + } + + fn delete_state(&self, key: Vec) -> Result<(), BackendError> { + let cf = self.cf_handle()?; + self.db + .delete_cf(&cf, key) + .map_err(|e| BackendError::IoError(e.to_string())) + } + + fn list_states(&self, start: Vec, end: Vec) -> Result>, BackendError> { + let cf = self.cf_handle()?; + let mut ropts = ReadOptions::default(); + ropts.set_iterate_upper_bound(end.clone()); + + let iter = + self.db + .iterator_cf_opt(&cf, ropts, IteratorMode::From(&start, Direction::Forward)); + let mut results = Vec::with_capacity(1024); + + for item in iter { + let (k, _) = item.map_err(|e| BackendError::IoError(e.to_string()))?; + if k.as_ref() >= end.as_slice() { + break; + } + results.push(k.to_vec()); + } + Ok(results) + } + + fn merge( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + value: Vec, + ) -> Result<(), BackendError> { + let cf = self.cf_handle()?; + let full_key = build_key(&key_group, &key, &namespace, &user_key); + self.db + .merge_cf(&cf, full_key, value) + .map_err(|e| BackendError::IoError(e.to_string())) + } + + fn delete_prefix_bytes(&self, prefix: Vec) -> Result { + if prefix.is_empty() { + return Err(BackendError::Other("Empty prefix".into())); + } + let cf = self.cf_handle()?; + + if !is_all_0xff(&prefix) { + let end_key = increment_key(&prefix); + self.db + .delete_range_cf(&cf, &prefix, &end_key) + .map_err(|e| BackendError::IoError(e.to_string()))?; + return Ok(0); + } + + let mut batch = WriteBatch::default(); + let mut count = 0; + let iter = self.db.prefix_iterator_cf(&cf, &prefix); + + for item in iter { + let (k, _) = item.map_err(|e| BackendError::IoError(e.to_string()))?; + if !k.starts_with(&prefix) { + break; + } + batch.delete_cf(&cf, k); + count += 1; + if count % 1000 == 0 { + self.db + .write(batch) + .map_err(|e| BackendError::IoError(e.to_string()))?; + batch = WriteBatch::default(); + } + } + self.db + .write(batch) + .map_err(|e| BackendError::IoError(e.to_string()))?; + Ok(count) + } + + fn scan(&self, prefix: Vec) -> Result, BackendError> { + Ok(Box::new(RocksDBStateIterator::new( + self.db.clone(), + self.cf_name.clone(), + prefix, + )?)) + } +} + +pub struct RocksDBStateIterator { + _db: Arc, + buffer: std::vec::IntoIter<(Vec, Vec)>, +} + +impl RocksDBStateIterator { + fn new(db: Arc, cf_name: String, prefix: Vec) -> Result { + let mut collected = Vec::with_capacity(512); + { + let cf = db + .cf_handle(&cf_name) + .ok_or_else(|| BackendError::Other("CF missing".into()))?; + let mut ropts = ReadOptions::default(); + ropts.set_prefix_same_as_start(true); + + let iter = + db.iterator_cf_opt(&cf, ropts, IteratorMode::From(&prefix, Direction::Forward)); + + for item in iter { + let (k, v) = item.map_err(|e| BackendError::IoError(e.to_string()))?; + if !k.starts_with(&prefix) { + break; + } + collected.push((k.to_vec(), v.to_vec())); + } + } + + Ok(Self { + _db: db, + buffer: collected.into_iter(), + }) + } +} + +impl StateIterator for RocksDBStateIterator { + fn has_next(&mut self) -> Result { + Ok(self.buffer.as_slice().len() > 0) + } + + fn next(&mut self) -> Result, Vec)>, BackendError> { + Ok(self.buffer.next()) + } +} + +fn merge_operator( + _: &[u8], + existing: Option<&[u8]>, + operands: &rocksdb::MergeOperands, +) -> Option> { + let size = existing.map_or(0, |v| v.len()) + operands.iter().map(|o| o.len()).sum::(); + let mut buf = Vec::with_capacity(size); + if let Some(v) = existing { + buf.extend_from_slice(v); + } + for op in operands { + buf.extend_from_slice(op); + } + Some(buf) +} diff --git a/src/storage/state_backend/server.rs b/src/storage/state_backend/server.rs new file mode 100644 index 00000000..efeceaa4 --- /dev/null +++ b/src/storage/state_backend/server.rs @@ -0,0 +1,92 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::config::storage::{StateStorageConfig, StateStorageType}; +use crate::config::{get_state_dir, get_state_dir_for_base}; +use crate::storage::state_backend::error::BackendError; +use crate::storage::state_backend::factory::{ + FactoryType, StateStoreFactory, get_factory_for_task, +}; +use crate::storage::state_backend::rocksdb::RocksDBConfig; +use std::fs; +use std::path::PathBuf; +use std::sync::Arc; + +pub struct StateStorageServer { + config: Arc, + resolved_state_dir: Option, + factory_type: FactoryType, +} + +impl StateStorageServer { + pub fn new(config: StateStorageConfig) -> Result { + let factory_type = match config.storage_type { + StateStorageType::Memory => FactoryType::Memory, + StateStorageType::RocksDB => FactoryType::RocksDB, + }; + + let resolved_state_dir = Self::resolve_and_prepare_dir(&config, &factory_type)?; + + Ok(Self { + config: Arc::new(config), + resolved_state_dir, + factory_type, + }) + } + + fn resolve_and_prepare_dir( + config: &StateStorageConfig, + factory_type: &FactoryType, + ) -> Result, BackendError> { + if !matches!(factory_type, FactoryType::RocksDB) { + return Ok(None); + } + + let base_dir = config.base_dir.as_deref().unwrap_or("data"); + + let state_dir = if base_dir == "data" { + get_state_dir() + } else { + get_state_dir_for_base(base_dir) + }; + + fs::create_dir_all(&state_dir).map_err(|e| { + BackendError::IoError(format!( + "Failed to create state storage directory at {:?}: {}", + state_dir, e + )) + })?; + + let final_path = state_dir.canonicalize().unwrap_or(state_dir); + Ok(Some(final_path)) + } + + pub fn create_factory( + &self, + task_name: String, + created_at: u64, + ) -> Result, BackendError> { + let rocksdb_config = if self.factory_type == FactoryType::RocksDB { + Some(RocksDBConfig::from(&self.config.rocksdb)) + } else { + None + }; + + get_factory_for_task( + self.factory_type, + task_name, + created_at, + self.resolved_state_dir.as_deref(), + rocksdb_config, + ) + } +} diff --git a/src/storage/state_backend/store.rs b/src/storage/state_backend/store.rs new file mode 100644 index 00000000..0065aa59 --- /dev/null +++ b/src/storage/state_backend/store.rs @@ -0,0 +1,288 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::storage::state_backend::error::BackendError; + +/// State store iterator +pub trait StateIterator: Send + Sync { + /// Check if there is a next element + /// + /// # Returns + /// - `Ok(true)`: has next element + /// - `Ok(false)`: no more data + /// - `Err(BackendError)`: check failed + fn has_next(&mut self) -> Result; + + /// Get the next key-value pair + /// + /// # Returns + /// - `Ok(Some((key, value)))`: next key-value pair + /// - `Ok(None)`: no more data + /// - `Err(BackendError)`: iteration failed + fn next(&mut self) -> Result, Vec)>, BackendError>; +} + +/// State store interface +/// +/// Provides complete state storage functionality, including: +/// - Simple KV operations (using byte arrays directly as keys) +/// - Complex KV operations (using complex keys) +/// - Iterator support +pub trait StateStore: Send + Sync { + /// Store a key-value pair (simple key) + /// + /// # Arguments + /// - `key`: key (byte array) + /// - `value`: value (byte array) + /// + /// # Returns + /// - `Ok(())`: store succeeded + /// - `Err(BackendError)`: store failed + fn put_state(&self, key: Vec, value: Vec) -> Result<(), BackendError>; + + /// Get value (simple key) + /// + /// # Arguments + /// - `key`: key (byte array) + /// + /// # Returns + /// - `Ok(Some(value))`: value found + /// - `Ok(None)`: key does not exist + /// - `Err(BackendError)`: get failed + fn get_state(&self, key: Vec) -> Result>, BackendError>; + + /// Delete a key-value pair (simple key) + /// + /// # Arguments + /// - `key`: key (byte array) + /// + /// # Returns + /// - `Ok(())`: delete succeeded + /// - `Err(BackendError)`: delete failed + fn delete_state(&self, key: Vec) -> Result<(), BackendError>; + + /// List all keys in the specified range + /// + /// # Arguments + /// - `start_inclusive`: start key (inclusive) + /// - `end_exclusive`: end key (exclusive) + /// + /// # Returns + /// - `Ok(keys)`: list of keys + /// - `Err(BackendError)`: list failed + fn list_states( + &self, + start_inclusive: Vec, + end_exclusive: Vec, + ) -> Result>, BackendError>; + + /// Store a key-value pair (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// - `user_key`: user key (byte array) + /// - `value`: value (byte array) + /// + /// # Returns + /// - `Ok(())`: store succeeded + /// - `Err(BackendError)`: store failed + fn put( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + value: Vec, + ) -> Result<(), BackendError> { + let key_bytes = crate::storage::state_backend::key_builder::build_key( + &key_group, &key, &namespace, &user_key, + ); + self.put_state(key_bytes, value) + } + + /// Get value (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// - `user_key`: user key (byte array) + /// + /// # Returns + /// - `Ok(Some(value))`: value found + /// - `Ok(None)`: key does not exist + /// - `Err(BackendError)`: get failed + fn get( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + ) -> Result>, BackendError> { + let key_bytes = crate::storage::state_backend::key_builder::build_key( + &key_group, &key, &namespace, &user_key, + ); + self.get_state(key_bytes) + } + + /// Delete a key-value pair (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// - `user_key`: user key (byte array) + /// + /// # Returns + /// - `Ok(())`: delete succeeded + /// - `Err(BackendError)`: delete failed + fn delete( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + ) -> Result<(), BackendError> { + let key_bytes = crate::storage::state_backend::key_builder::build_key( + &key_group, &key, &namespace, &user_key, + ); + self.delete_state(key_bytes) + } + + /// Merge value (complex key, using merge operation) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// - `user_key`: user key (byte array) + /// - `value`: value to merge (byte array) + /// + /// # Returns + /// - `Ok(())`: merge succeeded + /// - `Err(BackendError)`: merge failed + fn merge( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + user_key: Vec, + value: Vec, + ) -> Result<(), BackendError>; + + /// Delete all keys with the specified prefix (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// + /// # Returns + /// - `Ok(count)`: number of keys deleted + /// - `Err(BackendError)`: delete failed + fn delete_prefix( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + ) -> Result { + let prefix_bytes = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &[], + ); + self.delete_prefix_bytes(prefix_bytes) + } + + /// List all keys in the specified range (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// - `start_inclusive`: start user_key (inclusive) + /// - `end_exclusive`: end user_key (exclusive) + /// + /// # Returns + /// - `Ok(keys)`: list of keys (returns complete complex key byte arrays) + /// - `Err(BackendError)`: list failed + fn list_complex( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + start_inclusive: Vec, + end_exclusive: Vec, + ) -> Result>, BackendError> { + let start_key = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &start_inclusive, + ); + let end_key = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &end_exclusive, + ); + self.list_states(start_key, end_key) + } + + /// Delete all keys with the specified prefix (byte array) + /// + /// # Arguments + /// - `prefix`: key prefix (byte array) + /// + /// # Returns + /// - `Ok(count)`: number of keys deleted + /// - `Err(BackendError)`: delete failed + fn delete_prefix_bytes(&self, prefix: Vec) -> Result; + + /// Scan all key-value pairs with the specified prefix (simple key) + /// + /// # Arguments + /// - `prefix`: key prefix (byte array) + /// + /// # Returns + /// - `Ok(Box)`: iterator + /// - `Err(BackendError)`: failed to create iterator + fn scan(&self, prefix: Vec) -> Result, BackendError>; + + /// Scan all key-value pairs with the specified prefix (complex key) + /// + /// # Arguments + /// - `key_group`: key group (byte array) + /// - `key`: key (byte array) + /// - `namespace`: namespace (byte array) + /// + /// # Returns + /// - `Ok(Box)`: iterator + /// - `Err(BackendError)`: failed to create iterator + fn scan_complex( + &self, + key_group: Vec, + key: Vec, + namespace: Vec, + ) -> Result, BackendError> { + let prefix = crate::storage::state_backend::key_builder::build_key( + &key_group, + &key, + &namespace, + &[], + ); + self.scan(prefix) + } +} diff --git a/src/storage/task/factory.rs b/src/storage/task/factory.rs new file mode 100644 index 00000000..b25fa10a --- /dev/null +++ b/src/storage/task/factory.rs @@ -0,0 +1,41 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::rocksdb_storage::RocksDBTaskStorage; +use super::storage::TaskStorage; +use crate::config::storage::{TaskStorageConfig, TaskStorageType}; +use crate::config::{get_task_dir, resolve_path}; +use anyhow::{Context, Result}; + +pub struct TaskStorageFactory; + +impl TaskStorageFactory { + pub fn create_storage(config: &TaskStorageConfig) -> Result> { + match config.storage_type { + TaskStorageType::RocksDB => { + let db_path = if let Some(ref path) = config.db_path { + resolve_path(path) + } else { + get_task_dir() + }; + + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent) + .context(format!("Failed to create directory: {:?}", parent))?; + } + + let storage = RocksDBTaskStorage::new(db_path, Some(&config.rocksdb))?; + Ok(Box::new(storage)) + } + } + } +} diff --git a/src/storage/task/function_info.rs b/src/storage/task/function_info.rs new file mode 100644 index 00000000..5148e565 --- /dev/null +++ b/src/storage/task/function_info.rs @@ -0,0 +1,18 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[derive(Clone, Debug)] +pub struct FunctionInfo { + pub name: String, + pub task_type: String, + pub status: String, +} diff --git a/src/storage/task/mod.rs b/src/storage/task/mod.rs new file mode 100644 index 00000000..b4b3680f --- /dev/null +++ b/src/storage/task/mod.rs @@ -0,0 +1,24 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Task Storage module +//! +//! Provides storage interface and implementation for task information. + +pub mod factory; +mod function_info; +mod rocksdb_storage; +pub mod storage; + +pub use factory::TaskStorageFactory; +pub use function_info::FunctionInfo; +pub use storage::{StoredTaskInfo, TaskModuleBytes, TaskStorage}; diff --git a/src/storage/task/rocksdb_storage.rs b/src/storage/task/rocksdb_storage.rs new file mode 100644 index 00000000..31709a51 --- /dev/null +++ b/src/storage/task/rocksdb_storage.rs @@ -0,0 +1,211 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! RocksDB Task Storage - RocksDB-based task storage implementation +//! +//! Uses three column families: task_meta, task_config, task_payload. + +use super::storage::{StoredTaskInfo, TaskModuleBytes, TaskStorage}; +use crate::config::storage::RocksDBStorageConfig; +use crate::runtime::common::ComponentState; +use anyhow::{Context, Result, anyhow}; +use rocksdb::{ColumnFamilyDescriptor, DB, IteratorMode, Options, WriteBatch}; +use serde::{Deserialize, Serialize}; +use std::path::Path; +use std::sync::Arc; + +const CF_METADATA: &str = "task_meta"; +const CF_CONFIG: &str = "task_config"; +const CF_PAYLOAD: &str = "task_payload"; + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct TaskMetadata { + task_type: String, + state: ComponentState, + created_at: u64, + checkpoint_id: Option, +} + +pub struct RocksDBTaskStorage { + db: Arc, +} + +impl RocksDBTaskStorage { + pub fn new>(db_path: P, config: Option<&RocksDBStorageConfig>) -> Result { + let path = db_path.as_ref(); + + let mut db_opts = Options::default(); + db_opts.create_if_missing(true); + db_opts.create_missing_column_families(true); + + if let Some(cfg) = config { + Self::apply_tuning_parameters(&mut db_opts, cfg); + } + + let cf_descriptors = vec![ + ColumnFamilyDescriptor::new(CF_METADATA, Options::default()), + ColumnFamilyDescriptor::new(CF_CONFIG, Options::default()), + ColumnFamilyDescriptor::new(CF_PAYLOAD, Options::default()), + ]; + + let db = DB::open_cf_descriptors(&db_opts, path, cf_descriptors) + .with_context(|| format!("Failed to open RocksDB at {:?}", path))?; + + Ok(Self { db: Arc::new(db) }) + } + + fn apply_tuning_parameters(opts: &mut Options, cfg: &RocksDBStorageConfig) { + if let Some(v) = cfg.max_open_files { + opts.set_max_open_files(v); + } + if let Some(v) = cfg.write_buffer_size { + opts.set_write_buffer_size(v); + } + if let Some(v) = cfg.max_write_buffer_number { + opts.set_max_write_buffer_number(v); + } + opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + opts.set_compression_type(rocksdb::DBCompressionType::Lz4); + } + + fn get_cf(&self, name: &str) -> Result>> { + self.db + .cf_handle(name) + .ok_or_else(|| anyhow!("Storage integrity error: CF '{}' missing", name)) + } +} + +impl TaskStorage for RocksDBTaskStorage { + fn create_task(&self, task_info: &StoredTaskInfo) -> Result<()> { + let key = task_info.name.as_bytes(); + let cf_meta = self.get_cf(CF_METADATA)?; + let cf_conf = self.get_cf(CF_CONFIG)?; + let cf_payl = self.get_cf(CF_PAYLOAD)?; + + if self.db.get_cf(&cf_meta, key)?.is_some() { + return Err(anyhow!("Task uniqueness violation: {}", task_info.name)); + } + + let meta = TaskMetadata { + task_type: task_info.task_type.clone(), + state: task_info.state.clone(), + created_at: task_info.created_at, + checkpoint_id: task_info.checkpoint_id, + }; + + let mut batch = WriteBatch::default(); + batch.put_cf(&cf_meta, key, bincode::serialize(&meta)?); + batch.put_cf(&cf_conf, key, &task_info.config_bytes); + + if let Some(ref module) = task_info.module_bytes { + batch.put_cf(&cf_payl, key, bincode::serialize(module)?); + } + + self.db + .write(batch) + .context("Atomic transaction failed during task creation") + } + + fn update_task_state(&self, task_name: &str, new_state: ComponentState) -> Result<()> { + let cf = self.get_cf(CF_METADATA)?; + let key = task_name.as_bytes(); + + let raw = self + .db + .get_cf(&cf, key)? + .ok_or_else(|| anyhow!("Task {} not found", task_name))?; + + let mut meta: TaskMetadata = bincode::deserialize(&raw)?; + meta.state = new_state; + + self.db.put_cf(&cf, key, bincode::serialize(&meta)?)?; + Ok(()) + } + + fn update_task_checkpoint_id(&self, task_name: &str, checkpoint_id: Option) -> Result<()> { + let cf = self.get_cf(CF_METADATA)?; + let key = task_name.as_bytes(); + + let raw = self + .db + .get_cf(&cf, key)? + .ok_or_else(|| anyhow!("Task {} not found", task_name))?; + + let mut meta: TaskMetadata = bincode::deserialize(&raw)?; + meta.checkpoint_id = checkpoint_id; + + self.db.put_cf(&cf, key, bincode::serialize(&meta)?)?; + Ok(()) + } + + fn delete_task(&self, task_name: &str) -> Result<()> { + let key = task_name.as_bytes(); + let mut batch = WriteBatch::default(); + + batch.delete_cf(&self.get_cf(CF_METADATA)?, key); + batch.delete_cf(&self.get_cf(CF_CONFIG)?, key); + batch.delete_cf(&self.get_cf(CF_PAYLOAD)?, key); + + self.db.write(batch).context("Atomic deletion failed") + } + + fn load_task(&self, task_name: &str) -> Result { + let key = task_name.as_bytes(); + + let meta_raw = self + .db + .get_cf(&self.get_cf(CF_METADATA)?, key)? + .ok_or_else(|| anyhow!("Metadata missing: {}", task_name))?; + + let config_bytes = self + .db + .get_cf(&self.get_cf(CF_CONFIG)?, key)? + .ok_or_else(|| anyhow!("Config missing: {}", task_name))?; + + let module_bytes = self + .db + .get_cf(&self.get_cf(CF_PAYLOAD)?, key)? + .and_then(|b| bincode::deserialize::(&b).ok()); + + let meta: TaskMetadata = bincode::deserialize(&meta_raw)?; + + Ok(StoredTaskInfo { + name: task_name.to_string(), + task_type: meta.task_type, + module_bytes, + config_bytes: config_bytes.to_vec(), + state: meta.state, + created_at: meta.created_at, + checkpoint_id: meta.checkpoint_id, + }) + } + + fn list_all_tasks(&self) -> Result> { + let cf_meta = self.get_cf(CF_METADATA)?; + let iter = self.db.iterator_cf(&cf_meta, IteratorMode::Start); + let mut tasks = Vec::new(); + + for item in iter { + let (key, _) = item?; + let name = std::str::from_utf8(&key)?; + if let Ok(task) = self.load_task(name) { + tasks.push(task); + } + } + Ok(tasks) + } + + fn task_exists(&self, task_name: &str) -> Result { + let cf = self.get_cf(CF_METADATA)?; + Ok(self.db.get_cf(&cf, task_name.as_bytes())?.is_some()) + } +} diff --git a/src/storage/task/storage.rs b/src/storage/task/storage.rs new file mode 100644 index 00000000..3c9e4080 --- /dev/null +++ b/src/storage/task/storage.rs @@ -0,0 +1,56 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::runtime::common::ComponentState; +use anyhow::Result; +use serde::{Deserialize, Serialize}; + +#[allow(dead_code)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TaskModuleBytes { + Wasm(Vec), + Python { + class_name: String, + module: String, + #[serde(skip_serializing_if = "Option::is_none")] + bytes: Option>, + }, +} + +#[allow(dead_code)] +#[derive(Debug, Clone)] +pub struct StoredTaskInfo { + pub name: String, + pub task_type: String, + pub module_bytes: Option, + pub config_bytes: Vec, + pub state: ComponentState, + pub created_at: u64, + pub checkpoint_id: Option, +} + +#[allow(dead_code)] +pub trait TaskStorage: Send + Sync { + fn create_task(&self, task_info: &StoredTaskInfo) -> Result<()>; + + fn update_task_state(&self, task_name: &str, new_state: ComponentState) -> Result<()>; + + fn update_task_checkpoint_id(&self, task_name: &str, checkpoint_id: Option) -> Result<()>; + + fn delete_task(&self, task_name: &str) -> Result<()>; + + fn load_task(&self, task_name: &str) -> Result; + + fn task_exists(&self, task_name: &str) -> Result; + + fn list_all_tasks(&self) -> Result>; +} diff --git a/tests/common.go b/tests/common.go deleted file mode 100644 index d3bfddad..00000000 --- a/tests/common.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tests - -type Person struct { - Name string `json:"name"` - Money int `json:"money"` -} diff --git a/tests/docker-compose.yaml b/tests/docker-compose.yaml deleted file mode 100644 index 371fa003..00000000 --- a/tests/docker-compose.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2024 Function Stream Org. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: '3' -services: - pulsar: - image: apachepulsar/pulsar:3.1.2 - command: bin/pulsar standalone - ports: - - "6650:6650" - - "8080:8080" - nats: - image: nats:latest - container_name: nats-server - ports: - - "4222:4222" - - "8222:8222" - environment: - - NATS_ALLOW_NEW_USERS=true diff --git a/tests/integration_test.go b/tests/integration_test.go deleted file mode 100644 index f594809f..00000000 --- a/tests/integration_test.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2024 Function Stream Org. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tests - -import ( - "context" - "encoding/json" - "io" - "math/rand" - "strconv" - "testing" - - "github.com/apache/pulsar-client-go/pulsar" - adminclient "github.com/functionstream/function-stream/admin/client" - "github.com/functionstream/function-stream/admin/utils" - "github.com/functionstream/function-stream/common" - "github.com/functionstream/function-stream/server" -) - -func startServer() { - common.RunProcess(func() (io.Closer, error) { - s, err := server.NewDefaultServer() - if err != nil { - return nil, err - } - go s.Run(context.Background()) - return s, nil - }) -} - -func init() { - go startServer() -} - -func TestBasicFunction(t *testing.T) { - - cfg := adminclient.NewConfiguration() - cli := adminclient.NewAPIClient(cfg) - - client, err := pulsar.NewClient(pulsar.ClientOptions{ - URL: "pulsar://localhost:6650", - }) - if err != nil { - t.Fatalf(err.Error()) - } - - name := "func-" + strconv.Itoa(rand.Int()) - inputTopic := "test-input-" + strconv.Itoa(rand.Int()) - outputTopic := "test-output-" + strconv.Itoa(rand.Int()) - f := adminclient.ModelFunction{ - Name: name, - Runtime: adminclient.ModelRuntimeConfig{ - Type: common.WASMRuntime, - Config: map[string]interface{}{ - common.RuntimeArchiveConfigKey: "../bin/example_basic.wasm", - }, - }, - Source: utils.MakePulsarSourceTubeConfig(inputTopic), - Sink: *utils.MakePulsarSinkTubeConfig(outputTopic), - Replicas: 1, - } - - producer, err := client.CreateProducer(pulsar.ProducerOptions{ - Topic: inputTopic, - }) - if err != nil { - t.Fatalf(err.Error()) - } - - consumer, err := client.Subscribe(pulsar.ConsumerOptions{ - Topic: outputTopic, - SubscriptionName: "test-sub", - }) - if err != nil { - t.Fatalf(err.Error()) - } - - res, err := cli.FunctionAPI.CreateFunction(context.Background()).Body(f).Execute() - if err != nil && res == nil { - t.Errorf("failed to create function: %v", err) - } - if res.StatusCode != 200 { - body, _ := io.ReadAll(res.Body) - t.Fatalf("expected 200, got %d: %s", res.StatusCode, body) - return - } - - for i := 0; i < 10; i++ { - p := Person{Name: "rbt", Money: 0} - jsonBytes, err := json.Marshal(p) - if err != nil { - t.Fatalf(err.Error()) - } - _, err = producer.Send(context.Background(), &pulsar.ProducerMessage{ - Payload: jsonBytes, - }) - if err != nil { - return - } - - msg, err := consumer.Receive(context.Background()) - if err != nil { - t.Fatalf(err.Error()) - } - payload := msg.Payload() - var out Person - err = json.Unmarshal(payload, &out) - if err != nil { - t.Fatalf(err.Error()) - } - if out.Money != 1 { - t.Fatalf("expected 1, got %d", out.Money) - } - } - - res, err = cli.FunctionAPI.DeleteFunction(context.Background(), name).Execute() - if err != nil { - t.Fatalf(err.Error()) - } - if res.StatusCode != 200 { - t.Fatalf("expected 200, got %d", res.StatusCode) - } -} diff --git a/tests/test_config.json b/tests/test_config.json deleted file mode 100644 index 3a84cc56..00000000 --- a/tests/test_config.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "listen-addr": ":17300", - "tube-config": { - "my-tube": { - "key": "value" - } - }, - "runtime-config": { - "custom-runtime": { - "name": "test" - } - } -} \ No newline at end of file diff --git a/wit/processor.wit b/wit/processor.wit new file mode 100644 index 00000000..1bceed72 --- /dev/null +++ b/wit/processor.wit @@ -0,0 +1,62 @@ +package functionstream:core@0.1.0; + +interface kv { + variant error { + not-found, + io-error(string), + other(string), + } + + record complex-key { + key-group: list, + key: list, + namespace: list, + user-key: list, + } + + resource iterator { + has-next: func() -> result; + next: func() -> result, list>>, error>; + } + + resource store { + constructor(name: string); + + // --- Simple KV (Bytes) --- + put-state: func(key: list, value: list) -> result<_, error>; + get-state: func(key: list) -> result>, error>; + delete-state: func(key: list) -> result<_, error>; + list-states: func(start-inclusive: list, end-exclusive: list) -> result>, error>; + + // --- Complex KV --- + put: func(key: complex-key, value: list) -> result<_, error>; + get: func(key: complex-key) -> result>, error>; + delete: func(key: complex-key) -> result<_, error>; + merge: func(key: complex-key, value: list) -> result<_, error>; + delete-prefix: func(key: complex-key) -> result<_, error>; + list-complex: func(key-group: list, key: list, namespace: list, start-inclusive: list, end-exclusive: list) -> result>, error>; + + // --- Iterator --- + scan-complex: func(key-group: list, key: list, namespace: list) -> result; + + } +} + +interface collector { + emit: func(target-id: u32, data: list); + emit-watermark: func(target-id: u32, watermark: u64); +} + +world processor { + import collector; + import kv; + + export fs-init: func(config: list>); + export fs-process: func(source-id: u32, data: list); + export fs-process-watermark: func(source-id: u32, watermark: u64); + export fs-take-checkpoint: func(checkpoint-id: u64); + export fs-check-heartbeat: func() -> bool; + export fs-close: func(); + export fs-exec: func(class-name: string, modules: list>>); + export fs-custom: func(payload: list) -> list; +}