diff --git a/.github/workflows/check_on_push.yaml b/.github/workflows/check_on_push.yaml index c26ef364..8edd4417 100644 --- a/.github/workflows/check_on_push.yaml +++ b/.github/workflows/check_on_push.yaml @@ -9,14 +9,14 @@ jobs: if: | github.event_name == 'push' || github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@master - name: Setup Tarantool CE - uses: tarantool/setup-tarantool@v2 + uses: tarantool/setup-tarantool@v3 with: - tarantool-version: '2.10' + tarantool-version: '2.11' - name: Setup tt run: | diff --git a/.github/workflows/push_rockspec.yaml b/.github/workflows/push_rockspec.yaml index 668ff0ca..f8af9b55 100644 --- a/.github/workflows/push_rockspec.yaml +++ b/.github/workflows/push_rockspec.yaml @@ -14,7 +14,7 @@ jobs: version-check: # We need this job to run only on push with tag. if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }} - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Check module version uses: tarantool/actions/check-module-version@master @@ -22,7 +22,7 @@ jobs: module-name: 'crud' push-scm-rockspec: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: github.ref == 'refs/heads/master' steps: - uses: actions/checkout@master @@ -33,7 +33,7 @@ jobs: files: ${{ env.ROCK_NAME }}-scm-1.rockspec push-tagged-rockspec: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: startsWith(github.ref, 'refs/tags') needs: version-check steps: diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml index 03bb11c1..7b07f0c3 100644 --- a/.github/workflows/reusable_test.yml +++ b/.github/workflows/reusable_test.yml @@ -11,7 +11,7 @@ on: jobs: run_tests: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Clone the crud module @@ -39,8 +39,6 @@ jobs: - name: Install requirements run: ./deps.sh - env: - VSHARD_VERSION: "0.1.26" # This server starts and listen on 8084 port that is used for tests - name: Stop Mono server diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index 0d8f7ca5..e6c0fe35 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -10,51 +10,27 @@ jobs: github.event_name == 'push' || github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository strategy: + fail-fast: false matrix: - # We need 1.10.6 here to check that module works with - # old Tarantool versions that don't have "tuple-keydef"/"tuple-merger" support. - # We test old metrics with Tarantool 2.10 because since Tarantool 2.11.1 - # it uses its own metrics package. - # We test old metrics with Cartridge 2.7.9 because since 2.8.0 it - # requires metrics 1.0.0. - tarantool-version: ["1.10.6", "1.10", "2.8", "2.10", "2.11"] - metrics-version: [""] - cartridge-version: ["2.8.0"] - external-tuple-merger-version: [""] - external-tuple-keydef-version: [""] include: - - tarantool-version: "1.10" - metrics-version: "1.0.0" - cartridge-version: "2.8.0" - - tarantool-version: "2.7" - cartridge-version: "2.8.0" - tarantool-version: "2.10" - metrics-version: "0.10.0" - cartridge-version: "2.7.9" - tarantool-version: "2.11" coveralls: true - metrics-version: "1.0.0" - cartridge-version: "2.8.0" - - tarantool-version: "2.11" - metrics-version: "1.0.0" - vshard-version: "0.1.25" - tarantool-version: "2.11" - external-merger-version: "0.0.5" - external-keydef-version: "0.0.4" - - tarantool-version: "3.0.0" - vshard-version: "0.1.25" + external-tuple-merger-version: "0.0.5" + external-tuple-keydef-version: "0.0.4" + - tarantool-version: "3.0" + - tarantool-version: "3.1" + - tarantool-version: "3.2" + - tarantool-version: "3.3" - tarantool-version: "master" - vshard-version: "0.1.26" - fail-fast: false - # Can't install older versions on 22.04, - # see https://github.com/tarantool/setup-tarantool/issues/36 - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v4 - name: Setup Tarantool CE (1.x, 2.x) if: ${{ startsWith( matrix.tarantool-version, '1.' ) || startsWith( matrix.tarantool-version, '2.' ) }} - uses: tarantool/setup-tarantool@v2 + uses: tarantool/setup-tarantool@v3 with: tarantool-version: ${{ matrix.tarantool-version }} @@ -67,7 +43,7 @@ jobs: - name: Cache Tarantool 3.x if: ${{ startsWith( matrix.tarantool-version, '3.' ) }} id: cache-v3 - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: "${GITHUB_WORKSPACE}/bin" key: cache-${{ matrix.tarantool-version }} @@ -88,7 +64,7 @@ jobs: - name: Cache Tarantool master if: matrix.tarantool-version == 'master' id: cache-latest - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: "${GITHUB_WORKSPACE}/bin" key: cache-latest-${{ env.LATEST_COMMIT }} @@ -103,18 +79,10 @@ jobs: if: ${{ startsWith( matrix.tarantool-version, '3.' ) || matrix.tarantool-version == 'master' }} run: echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH - - name: Fix luarocks in Tarantool CE 1.10.6 - if: matrix.tarantool-version == '1.10.6' - run: | - sudo patch -p1 /usr/share/tarantool/luarocks/manif.lua luarocks.patch - - name: Install requirements for community run: | tarantool --version ./deps.sh - env: - CARTRIDGE_VERSION: ${{ matrix.cartridge-version }} - VSHARD_VERSION: ${{ matrix.vshard-version }} - name: Install metrics if: matrix.metrics-version != '' @@ -144,116 +112,30 @@ jobs: run: make -C build coveralls if: ${{ matrix.coveralls }} - run-perf-tests-ce: - if: | - github.event_name == 'push' || - github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository - strategy: - matrix: - tarantool-version: ["1.10", "2.11"] - metrics-version: ["1.0.0"] - cartridge-version: ["2.8.0"] - include: - - tarantool-version: "3.0.0" - vshard-version: "0.1.25" - fail-fast: false - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@master - - - name: Setup Tarantool CE (1.x, 2.x) - if: ${{ startsWith( matrix.tarantool-version, '1.' ) || startsWith( matrix.tarantool-version, '2.' ) }} - uses: tarantool/setup-tarantool@v2 - with: - tarantool-version: ${{ matrix.tarantool-version }} - - - name: Setup tt - run: | - curl -L https://tarantool.io/release/2/installer.sh | sudo bash - sudo apt install -y tt - tt version - - - name: Cache Tarantool 3.x - if: ${{ startsWith( matrix.tarantool-version, '3.' ) }} - id: cache-v3 - uses: actions/cache@v3 - with: - path: "${GITHUB_WORKSPACE}/bin" - key: cache-${{ matrix.tarantool-version }} - - - name: Setup Tarantool CE (3.x) - if: ${{ startsWith( matrix.tarantool-version, '3.' ) && steps.cache-latest.outputs.cache-hit != 'true' }} - run: | - tt init - tt install tarantool ${{ matrix.tarantool-version }} - - - name: Get Tarantool master latest commit - if: matrix.tarantool-version == 'master' - run: | - commit_hash=$(git ls-remote https://github.com/tarantool/tarantool.git --branch master | head -c 8) - echo "LATEST_COMMIT=${commit_hash}" >> $GITHUB_ENV - shell: bash - - - name: Cache Tarantool master - if: matrix.tarantool-version == 'master' - id: cache-latest - uses: actions/cache@v3 - with: - path: "${GITHUB_WORKSPACE}/bin" - key: cache-latest-${{ env.LATEST_COMMIT }} - - - name: Setup Tarantool CE (master) - if: matrix.tarantool-version == 'master' && steps.cache-latest.outputs.cache-hit != 'true' - run: | - tt init - tt install tarantool master - - - name: Add tt Tarantool to PATH - if: ${{ startsWith( matrix.tarantool-version, '3.' ) || matrix.tarantool-version == 'master' }} - run: echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH - - - name: Install requirements for community - run: | - tarantool --version - ./deps.sh - env: - CARTRIDGE_VERSION: ${{ matrix.cartridge-version }} - VSHARD_VERSION: ${{ matrix.vshard-version }} - - - name: Install metrics - if: matrix.metrics-version != '' - run: tt rocks install metrics ${{ matrix.metrics-version }} - - # This server starts and listen on 8084 port that is used for tests - - name: Stop Mono server - run: sudo kill -9 $(sudo lsof -t -i tcp:8084) || true - - - run: cmake -S . -B build - - - name: Run performance tests - run: make -C build performance - run-tests-ee: if: github.event_name == 'push' strategy: + fail-fast: false matrix: - tarantool-version: - - folder: "1.10" - bundle: "tarantool-enterprise-sdk-1.10.15-0-r609" - - folder: "2.11" - bundle: "tarantool-enterprise-sdk-nogc64-2.11.2-0-r609.linux.x86_64" - metrics-version: ["", "1.0.0"] - cartridge-version: ["2.8.0"] include: - tarantool-version: - folder: "3.0" - bundle: "tarantool-enterprise-sdk-gc64-3.0.0-0-gf58f7d82a-r23.linux.x86_64" - vshard-version: "0.1.25" - - fail-fast: false - runs-on: ubuntu-20.04 + folder: "2.10" + bundle: "tarantool-enterprise-sdk-gc64-2.10.8-0-r691.linux.x86_64" + - tarantool-version: + folder: "2.11" + bundle: "tarantool-enterprise-sdk-gc64-2.11.7-0-r691.linux.x86_64" + - tarantool-version: + folder: "3.2" + bundle: "tarantool-enterprise-sdk-gc64-3.2.0-0-r40.linux.x86_64" + - tarantool-version: + folder: "3.3" + bundle: "tarantool-enterprise-sdk-gc64-3.3.2-0-r62.linux.x86_64" + - tarantool-version: + folder: "3.4" + bundle: "tarantool-enterprise-sdk-gc64-3.4.0-0-r62.linux.x86_64" + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v4 - name: Install requirements for enterprise run: | diff --git a/.gitignore b/.gitignore index 5c4f4892..3ebe03f1 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,9 @@ luacov.stats.out build/*.cmake build/Makefile .idea +sdk +sdk-2 +sdk-3 # Vim Swap files. .*.s[a-w][a-z] diff --git a/.luacheckrc b/.luacheckrc index cf34b61f..462c2328 100644 --- a/.luacheckrc +++ b/.luacheckrc @@ -1,6 +1,6 @@ redefined = false globals = {'box', 'utf8', 'checkers', '_TARANTOOL'} include_files = {'**/*.lua', '*.luacheckrc', '*.rockspec'} -exclude_files = {'**/*.rocks/', 'tmp/', 'tarantool-enterprise/'} +exclude_files = {'**/*.rocks/', 'tmp/', 'sdk-*'} max_line_length = 120 max_comment_line_length = 150 diff --git a/.luacov b/.luacov index e4de15e4..40abb4e4 100644 --- a/.luacov +++ b/.luacov @@ -1,4 +1,12 @@ +local ci_node_index = os.getenv("CI_NODE_INDEX") or "" + +statsfile = "luacov.stats" .. ci_node_index .. ".out" exclude = { - '/test/', - '/.rocks/', + "test" +} +include = { + "crud%/.+$", + "crud.lua", + "roles%/.+$", + "cartridge%/.+$", } diff --git a/CHANGELOG.md b/CHANGELOG.md index 04c15224..21393c45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,10 +7,24 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [Unreleased] -### Fixed +### Changed +* bumped: metrics version in rockspec +* bumped: cartridge version in rockspec +* Role-based model support has been implemented in CRUD (https://jira.vk.team/browse/TNTP-2177) + +### Added +* Validation of `bucket_id`. Invalid values now raise `BucketIDError` + before routing. [TNTP-3536](https://jira.vk.team/browse/TNTP-3536) +### Fixed +* Fixed compatibility with cartridge `2.16.0` ([TNTP-3598](https://jira.vk.team/browse/TNTP-3598)) +* `crud.schema` no longer returns TCF system space `_cdc_state`. * `crud.schema` no longer returns system space `_gc_consumers` with Tarantool 3.2+. +* `crud.schema` no longer returns `tt` system space `_tt_migrations`. * Tests of `schema` with Tarantool 3.2+. +* Fixed bad error handling for method `call.single` +* Added support for working in 3.1 with data from 2.11, previously there was an error +due to the inability to find the replicasets by name https://github.com/tarantool/crud-ee/issues/16 ## [1.5.2] - 20-05-24 @@ -22,7 +36,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [1.5.1] - 27-04-24 -### Added +### Changed * Clarify license in the rockspec (#434). ## [1.5.0] - 03-04-24 diff --git a/LICENSE b/LICENSE index 6c4f181f..0ba6d946 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2020-2024 crud AUTHORS: please see the AUTHORS file. +Copyright 2020-2025 crud AUTHORS: please see the AUTHORS file. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..760b4c04 --- /dev/null +++ b/Makefile @@ -0,0 +1,61 @@ +SHELL := /bin/bash + +S3_TARANTOOL_SDK_3_PATH := s3://packages/enterprise/release/linux/x86_64/3.3/tarantool-enterprise-sdk-gc64-3.3.2-0-r62.linux.x86_64.tar.gz +S3_TARANTOOL_SDK_2_PATH := s3://packages/enterprise/release/linux/x86_64/2.11/tarantool-enterprise-sdk-gc64-2.11.7-0-r691.linux.x86_64.tar.gz +S3_ENDPOINT_URL := $(if $(S3_ENDPOINT_URL),$(S3_ENDPOINT_URL),https://hb.vkcs.cloud) + +.rocks: sdk + source ./sdk-2/env.sh && \ + tt rocks install luacheck 0.26.0 --only-server=sdk-2/rocks && \ + tt rocks install luacov 0.13.0 --only-server=sdk-2/rocks && \ + tt rocks install luacov-reporters 0.1.0 --only-server=sdk-2/rocks && \ + tt rocks install metrics 1.5.0 && \ + tt rocks install cartridge 2.16.3 && \ + tt rocks install migrations 1.1.0 && \ + tt rocks make + +sdk-2: + aws --endpoint-url "$(S3_ENDPOINT_URL)" s3 cp "$(S3_TARANTOOL_SDK_2_PATH)" . + mkdir sdk-2 && tar -xvzf tarantool-enterprise-*.tar.gz -C ./sdk-2 --strip-components=1 && rm tarantool-enterprise-*.tar.gz + +sdk-3: + aws --endpoint-url "$(S3_ENDPOINT_URL)" s3 cp "$(S3_TARANTOOL_SDK_3_PATH)" . + mkdir sdk-3 && tar -xvzf tarantool-enterprise-*.tar.gz -C ./sdk-3 --strip-components=1 && rm tarantool-enterprise-*.tar.gz + +sdk: sdk-2 sdk-3 + source sdk-3/env.sh && \ + cp sdk-2/rocks/luatest-1.0.1-1.all.rock sdk-3/rocks/ && \ + chmod 644 sdk-3/rocks/* && \ + tt rocks make_manifest sdk-3/rocks + +lint: .rocks + source sdk-2/env.sh && .rocks/bin/luacheck . + +.PHONY: test +test: + @if [ -z "$(SDK_TEST)" ]; then \ + echo "Select SDK:"; \ + echo "1) SDK with Tarantool 2.x"; \ + echo "2) SDK with Tarantool 3.x"; \ + read -p "Enter number (1 or 2): " choice; \ + case $$choice in \ + 1) SDK_TEST=sdk-2; SDK_LABEL="SDK with Tarantool 2.x" ;; \ + 2) SDK_TEST=sdk-3; SDK_LABEL="SDK with Tarantool 3.x" ;; \ + *) echo "Invalid selection" >&2; exit 1 ;; \ + esac; \ + else \ + if [ "$(SDK_TEST)" = "sdk-2" ]; then \ + SDK_LABEL="SDK with Tarantool 2.x"; \ + elif [ "$(SDK_TEST)" = "sdk-3" ]; then \ + SDK_LABEL="SDK with Tarantool 3.x"; \ + else \ + SDK_LABEL="Custom SDK ($(SDK_TEST))"; \ + fi; \ + fi; \ + echo "Running tests with $$SDK_LABEL..."; \ + source $$SDK_TEST/env.sh && \ + tt rocks install luatest 1.0.1 --only-server=$$SDK_TEST/rocks && \ + .rocks/bin/luatest -v --coverage test/integration/select_test.lua + +coverage: + source sdk-2/env.sh && ./.rocks/bin/luacov -r summary && cat luacov.report.out diff --git a/README.md b/README.md index 0d0cf6b1..580abfb3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ [![Run static analysis](https://github.com/tarantool/crud/actions/workflows/check_on_push.yaml/badge.svg)](https://github.com/tarantool/crud/actions/workflows/check_on_push.yaml) [![Run tests](https://github.com/tarantool/crud/actions/workflows/test_on_push.yaml/badge.svg)](https://github.com/tarantool/crud/actions/workflows/test_on_push.yaml) -[![Coverage Status](https://coveralls.io/repos/github/tarantool/crud/badge.svg?branch=master)](https://coveralls.io/github/tarantool/crud?branch=master) The `CRUD` module allows to perform CRUD operations on the cluster. It also provides the `crud-storage` and `crud-router` roles for @@ -55,6 +54,7 @@ It also provides the `crud-storage` and `crud-router` roles for - [Cartridge roles](#cartridge-roles) - [Usage](#usage-2) - [License](#license) +- [For developers](#for-developers) @@ -66,7 +66,7 @@ First, [install Tarantool](https://www.tarantool.io/en/download). #### Manual install -To try `crud` in your application, you may install it manually fron web +To try `crud` in your application, you may install it manually from web with `tt rocks` rock management tool. ```bash @@ -123,6 +123,25 @@ For Tarantool 1.10, 2.x and 3.x you can also manually call the [crud initialization code](#api) on [VShard](https://github.com/tarantool/vshard) router and storage instances. +> **Note** +> +> Before changing the cluster configuration (for example, adding a new replica set or triggering bucket rebalancing), +> follow these steps to ensure consistent data operations: +> +> 1. Pause all DML operations (insert, update, delete) across your application. +> 2. Apply topology changes or initiate rebalancing. +> 3. Wait until all buckets have finished migrating. +> 4. Clear the route map cache on all routers: +> ```lua +> vshard.router._route_map_clear() +> ``` +> 5. Resume DML operations. +> +> Following these steps ensures correct routing and consistent CRUD behavior after topology updates. +> +> Failure to follow these steps may lead to issues such as duplicated records, +> missing updates, or inconsistent state across replica sets due to incorrect routing during rebalancing. + ### Sandbox The repository provide a simple sandbox application with a test dataset on a single instance. @@ -137,6 +156,12 @@ tarantool> crud.select('developers', nil, {first = 6}) The CRUD operations should be called from router. +Assuming that: + +```lua +local crud = require('crud') +``` + All VShard storages should call `crud.init_storage()` after `vshard.storage.cfg()` (or enable the `roles.crud-storage` role for Tarantool 3 or the `crud-storage` role for Cartridge) @@ -1949,7 +1974,6 @@ issues.
Full configuration example - ```yaml credentials: users: @@ -2034,28 +2058,12 @@ issues. ```lua -- myrouter.lua - - local clock = require('clock') - local fiber = require('fiber') local log = require('log') - local vshard = require('vshard') - local TIMEOUT = 60 - local DELAY = 0.1 - - local start = clock.monotonic() - while clock.monotonic() - start < TIMEOUT do - local ok, err = vshard.router.bootstrap({ - if_not_bootstrapped = true, - }) - - if ok then - break - end - - log.info(('Router bootstrap error: %s'):format(err)) - fiber.sleep(DELAY) + local ok, err = vshard.router.bootstrap({timeout = 15}) + if not ok then + log.info(('Router bootstrap error: %s'):format(err)) end ``` diff --git a/crud-scm-1.rockspec b/crud-scm-1.rockspec index d72e540c..30d0c4b0 100644 --- a/crud-scm-1.rockspec +++ b/crud-scm-1.rockspec @@ -11,9 +11,9 @@ description = { dependencies = { 'lua ~> 5.1', - 'checks >= 3.1.0-1', + 'checks >= 3.3.0-1', 'errors >= 2.2.1-1', - 'vshard >= 0.1.18-1', + 'vshard >= 0.1.36-1', } build = { diff --git a/crud/cfg.lua b/crud/cfg.lua index 35e78674..95462c89 100644 --- a/crud/cfg.lua +++ b/crud/cfg.lua @@ -157,7 +157,7 @@ local function __call(self, opts) }) -- Value validation would be performed in stats checks, if required. - opts = table.deepcopy(opts) or {} + opts = table.deepcopy(opts or {}) -- opts from Cartridge clusterwide configuration is read-only, -- but we want to work with copy anyway. setmetatable(opts, {}) diff --git a/crud/common/call.lua b/crud/common/call.lua index 710d8008..9f939978 100644 --- a/crud/common/call.lua +++ b/crud/common/call.lua @@ -1,5 +1,6 @@ local errors = require('errors') +local call_cache = require('crud.common.call_cache') local dev_checks = require('crud.common.dev_checks') local utils = require('crud.common.utils') local sharding_utils = require('crud.common.sharding.utils') @@ -11,8 +12,18 @@ local BasePostprocessor = require('crud.common.map_call_cases.base_postprocessor local CallError = errors.new_class('CallError') +local CALL_FUNC_NAME = 'call_on_storage' +local CRUD_CALL_FUNC_NAME = utils.get_storage_call(CALL_FUNC_NAME) + + local call = {} +local function call_on_storage(run_as_user, func_name, ...) + return box.session.su(run_as_user, call_cache.func_name_to_func(func_name), ...) +end + +call.storage_api = {[CALL_FUNC_NAME] = call_on_storage} + function call.get_vshard_call_name(mode, prefer_replica, balance) dev_checks('string', '?boolean', '?boolean') @@ -71,11 +82,12 @@ local function wrap_vshard_err(vshard_router, err, func_name, replicaset_id, buc )) end -local function retry_call_with_master_discovery(replicaset, method, ...) +local function retry_call_with_master_discovery(replicaset, method, func_name, func_args, call_opts) + local func_args_ext = utils.append_array({ box.session.effective_user(), func_name }, func_args) + -- In case cluster was just bootstrapped with auto master discovery, -- replicaset may miss master. - - local resp, err = replicaset[method](replicaset, ...) + local resp, err = replicaset[method](replicaset, CRUD_CALL_FUNC_NAME, func_args_ext, call_opts) if err == nil then return resp, err @@ -87,7 +99,7 @@ local function retry_call_with_master_discovery(replicaset, method, ...) -- Retry only once: should be enough for initial discovery, -- otherwise force user fix up cluster bootstrap. - return replicaset[method](replicaset, ...) + return replicaset[method](replicaset, CRUD_CALL_FUNC_NAME, func_args_ext, call_opts) end function call.map(vshard_router, func_name, func_args, opts) @@ -198,7 +210,7 @@ function call.single(vshard_router, bucket_id, func_name, func_args, opts) local replicaset, err = vshard_router:route(bucket_id) if err ~= nil then - return nil, CallError:new("Failed to get router replicaset: %s", err.err) + return nil, CallError:new("Failed to get router replicaset: %s", tostring(err)) end local timeout = opts.timeout or const.DEFAULT_VSHARD_CALL_TIMEOUT diff --git a/crud/common/call_cache.lua b/crud/common/call_cache.lua new file mode 100644 index 00000000..637325ae --- /dev/null +++ b/crud/common/call_cache.lua @@ -0,0 +1,31 @@ +local func_name_to_func_cache = {} + +local function func_name_to_func(func_name) + if func_name_to_func_cache[func_name] then + return func_name_to_func_cache[func_name] + end + + local current = _G + for part in string.gmatch(func_name, "[^%.]+") do + current = rawget(current, part) + if current == nil then + error(("Function '%s' is not registered"):format(func_name)) + end + end + + if type(current) ~= "function" then + error(func_name .. " is not a function") + end + + func_name_to_func_cache[func_name] = current + return current +end + +local function reset() + func_name_to_func_cache = {} +end + +return { + func_name_to_func = func_name_to_func, + reset = reset, +} \ No newline at end of file diff --git a/crud/common/schema.lua b/crud/common/schema.lua index 9583c641..4a2c3c37 100644 --- a/crud/common/schema.lua +++ b/crud/common/schema.lua @@ -217,15 +217,16 @@ function schema.wrap_func_result(space, func, args, opts) opts = opts or {} local ok, func_res = pcall(func, unpack(args)) - if not ok then - result.err = func_res - if opts.add_space_schema_hash then - result.space_schema_hash = get_space_schema_hash(space) - end - else + if ok then if opts.noreturn ~= true then result.res = filter_tuple_fields(func_res, opts.field_names) end + else + result.err = func_res + if opts.add_space_schema_hash then + local _, space_schema_hash = pcall(get_space_schema_hash, space) + result.space_schema_hash = space_schema_hash + end end if opts.fetch_latest_metadata == true then diff --git a/crud/common/sharding/init.lua b/crud/common/sharding/init.lua index 4a96f316..0bee8495 100644 --- a/crud/common/sharding/init.lua +++ b/crud/common/sharding/init.lua @@ -29,9 +29,14 @@ function sharding.get_replicasets_by_bucket_id(vshard_router, bucket_id) end function sharding.key_get_bucket_id(vshard_router, space_name, key, specified_bucket_id) - dev_checks('table', 'string', '?', '?number|cdata') + dev_checks('table', 'string', '?', '?') if specified_bucket_id ~= nil then + local err = sharding.validate_bucket_id(specified_bucket_id) + if err ~= nil then + return nil, err + end + return { bucket_id = specified_bucket_id } end @@ -77,6 +82,15 @@ function sharding.tuple_get_bucket_id(vshard_router, tuple, space, specified_buc } end +function sharding.validate_bucket_id(bucket_id) + if not utils.is_uint(bucket_id) or bucket_id < 1 then + return BucketIDError:new( + "Invalid bucket_id: expected unsigned, got %s", + type(bucket_id) + ) + end +end + function sharding.tuple_set_and_return_bucket_id(vshard_router, tuple, space, specified_bucket_id) local bucket_id_fieldno, err = utils.get_bucket_id_fieldno(space) if err ~= nil then @@ -108,6 +122,11 @@ function sharding.tuple_set_and_return_bucket_id(vshard_router, tuple, space, sp sharding_data.skip_sharding_hash_check = true end + err = sharding.validate_bucket_id(sharding_data.bucket_id) + if err ~= nil then + return nil, err + end + return sharding_data end diff --git a/crud/common/sharding/sharding_func.lua b/crud/common/sharding/sharding_func.lua index dbc57a2a..f60c7f2b 100644 --- a/crud/common/sharding/sharding_func.lua +++ b/crud/common/sharding/sharding_func.lua @@ -10,7 +10,7 @@ local ShardingFuncError = errors.new_class('ShardingFuncError', {capture_stack local sharding_func_module = {} local sharding_module_names = { - ['vshard'] = true, + ['vshard'] = 'vshard', } local function is_callable(object) @@ -50,8 +50,9 @@ local function get_function_from_G(func_name) local sharding_module = false local ok - if sharding_module_names[chunks[1]] then - ok, sharding_func = pcall(require, chunks[1]) + if sharding_module_names[chunks[1]] ~= nil then + local module_name = sharding_module_names[chunks[1]] + ok, sharding_func = pcall(require, module_name) if not ok then return nil end diff --git a/crud/common/utils.lua b/crud/common/utils.lua index b2b178a2..5bf0a715 100644 --- a/crud/common/utils.lua +++ b/crud/common/utils.lua @@ -1166,18 +1166,22 @@ function utils.update_storage_call_error_description(err, func_name, replicaset_ return nil end - if (err.type == 'ClientError' or err.type == 'AccessDeniedError') + if (err.type == 'ClientError' or err.type == 'AccessDeniedError' or err.type == 'LuajitError') and type(err.message) == 'string' then local not_defined_str = string.format("Procedure '%s' is not defined", func_name) + local not_registered_str = string.format("Function '%s' is not registered", func_name) local access_denied_str = string.format("Execute access to function '%s' is denied", func_name) - if err.message == not_defined_str or err.message:startswith(access_denied_str) then + if err.message == not_defined_str or err.message:startswith(access_denied_str) + or err.message:find(not_registered_str) + or err.message == "Procedure '_crud.call_on_storage' is not defined" + or err.message:startswith("Execute access to function '_crud.call_on_storage' is denied") then if func_name:startswith('_crud.') then - err = NotInitializedError:new("Function %s is not registered: " .. + err = NotInitializedError:new("Function '%s' is not registered: " .. "crud isn't initialized on replicaset %q or crud module versions mismatch " .. "between router and storage", func_name, replicaset_id or "Unknown") else - err = NotInitializedError:new("Function %s is not registered", func_name) + err = NotInitializedError:new("Function '%s' is not registered", func_name) end end end @@ -1324,4 +1328,25 @@ for k, v in pairs(require('crud.common.vshard_utils')) do utils[k] = v end +function utils.append_array(array_src, array_dst) + if not array_dst then + return array_src + end + + table.move(array_dst, 1, #array_dst, #array_src + 1, array_src) + + return array_src +end + +function utils.is_uint(value) + if type(value) == 'number' then + return value >= 0 and math.floor(value) == value + elseif type(value) == 'cdata' then + local ok, casted = pcall(tonumber, value) + return ok and type(casted) == 'number' and casted >= 0 and math.floor(casted) == casted + end + + return false +end + return utils diff --git a/crud/common/vshard_utils.lua b/crud/common/vshard_utils.lua index c0da0ef3..9a7a4ce2 100644 --- a/crud/common/vshard_utils.lua +++ b/crud/common/vshard_utils.lua @@ -4,15 +4,48 @@ local vshard = require('vshard') local vshard_utils = {} +-- get_replicasets returns vshard replicasets from vshard.storage.internal +-- copy pasted from https://github.com/tarantool/vshard/blob/9ad0e2726a5137398f50fe88ac105f53e446c3e2/vshard/storage/init.lua#L3962-L3984 +-- todo: remove after https://github.com/tarantool/vshard/issues/565 closed +local function get_replicasets() + local ireplicasets = {} + local M = vshard.storage.internal + local is_named = M.this_replica.id == M.this_replica.name + for id, replicaset in pairs(M.replicasets) do + local master = replicaset.master + local master_info + if replicaset.is_master_auto then + master_info = 'auto' + elseif not master then + master_info = 'missing' + else + local uri = master:safe_uri() + local conn = master.conn + master_info = { + uri = uri, uuid = conn and conn.peer_uuid, + name = is_named and master.name or nil, + state = conn and conn.state, error = conn and conn.error, + } + end + ireplicasets[id] = { + uuid = replicaset.uuid, + name = is_named and replicaset.name or nil, + master = master_info, + } + end + return ireplicasets +end + function vshard_utils.get_self_vshard_replicaset() - local box_info = box.info() + local box_info = vshard_utils.__get_box_info() - local ok, storage_info = pcall(vshard.storage.info) + local ok, storage_info = vshard_utils.__get_storage_info() assert(ok, 'vshard.storage.cfg() must be called first') - if vshard_utils.get_vshard_identification_mode() == 'name_as_key' then - local replicaset_name = box_info.replicaset.name + local is_needs_upgrade_2_11 = vshard_utils.is_schema_needs_upgrade_from_2_11() + if vshard_utils.get_vshard_identification_mode() == 'name_as_key' and not is_needs_upgrade_2_11 then + local replicaset_name = box_info.replicaset.name return replicaset_name, storage_info.replicasets[replicaset_name] else local replicaset_uuid @@ -22,7 +55,38 @@ function vshard_utils.get_self_vshard_replicaset() replicaset_uuid = box_info.cluster.uuid end - return replicaset_uuid, storage_info.replicasets[replicaset_uuid] + for _, rep in pairs(storage_info.replicasets) do + if rep.uuid == replicaset_uuid then + return replicaset_uuid, rep + end + end + error(('failed to find replicaset by uuid %s'):format(replicaset_uuid)) + end +end + +-- for unit tests +function vshard_utils.__get_storage_info() + -- cartridge disable vshard.storage on the very first apply_config + -- here we check this and do not call vshard.storage.info + -- todo: remove after https://github.com/tarantool/vshard/issues/565 closed + if vshard.storage.internal.is_enabled == false then + return true, { + replicasets = get_replicasets(), + } + end + return pcall(vshard.storage.info) +end + +-- for unit tests +function vshard_utils.__get_box_info() + return box.info() +end + +function vshard_utils.is_schema_needs_upgrade_from_2_11() + local version_tup = box.space._schema:get({'version'}) + local version_str = ("%s.%s"):format(version_tup[2], version_tup[3]) + if version_str == "2.11" and box.internal.schema_needs_upgrade() then + return true end end diff --git a/crud/compare/plan.lua b/crud/compare/plan.lua index ffe45596..e180a596 100644 --- a/crud/compare/plan.lua +++ b/crud/compare/plan.lua @@ -168,7 +168,7 @@ function plan.new(space, conditions, opts) field_names = '?table', force_map_call = '?boolean', sharding_key_as_index_obj = '?table', - bucket_id = '?number|cdata', + bucket_id = '?', }) conditions = conditions ~= nil and conditions or {} diff --git a/crud/count.lua b/crud/count.lua index 899f83b8..c641339a 100644 --- a/crud/count.lua +++ b/crud/count.lua @@ -51,21 +51,21 @@ local function count_on_storage(space_name, index_id, conditions, opts) end local value = opts.scan_value - - local filter_func, err = filters.gen_func(space, index, conditions, { - tarantool_iter = opts.tarantool_iter, - scan_condition_num = opts.scan_condition_num, - }) - if err ~= nil then - return nil, CountError:new("Failed to generate tuples filter: %s", err) - end - + local filter_func local tuples_count = 0 local looked_up_tuples = 0 for _, tuple in index:pairs(value, {iterator = opts.tarantool_iter}) do if tuple == nil then break + elseif not filter_func then + filter_func, err = filters.gen_func(space, index, conditions, { + tarantool_iter = opts.tarantool_iter, + scan_condition_num = opts.scan_condition_num, + }) + if err ~= nil then + return nil, CountError:new("Failed to generate tuples filter: %s", err) + end end looked_up_tuples = looked_up_tuples + 1 @@ -110,7 +110,7 @@ end local function call_count_on_router(vshard_router, space_name, user_conditions, opts) checks('table', 'string', '?table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fullscan = '?boolean', yield_every = '?number', @@ -317,7 +317,7 @@ end function count.call(space_name, user_conditions, opts) checks('string', '?table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fullscan = '?boolean', yield_every = '?number', diff --git a/crud/delete.lua b/crud/delete.lua index cb4d5cee..3740aa8f 100644 --- a/crud/delete.lua +++ b/crud/delete.lua @@ -60,7 +60,7 @@ delete.storage_api = {[DELETE_FUNC_NAME] = delete_on_storage} local function call_delete_on_router(vshard_router, space_name, key, opts) dev_checks('table', 'string', '?', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', vshard_router = '?string|table', noreturn = '?boolean', @@ -194,7 +194,7 @@ end function delete.call(space_name, key, opts) checks('string', '?', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', vshard_router = '?string|table', noreturn = '?boolean', diff --git a/crud/get.lua b/crud/get.lua index 4d4ffc96..d6844527 100644 --- a/crud/get.lua +++ b/crud/get.lua @@ -58,7 +58,7 @@ get.storage_api = {[GET_FUNC_NAME] = get_on_storage} local function call_get_on_router(vshard_router, space_name, key, opts) dev_checks('table', 'string', '?', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', prefer_replica = '?boolean', balance = '?boolean', @@ -196,7 +196,7 @@ end function get.call(space_name, key, opts) checks('string', '?', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', prefer_replica = '?boolean', balance = '?boolean', diff --git a/crud/insert.lua b/crud/insert.lua index d8b848a0..ebb3f865 100644 --- a/crud/insert.lua +++ b/crud/insert.lua @@ -61,7 +61,7 @@ insert.storage_api = {[INSERT_FUNC_NAME] = insert_on_storage} local function call_insert_on_router(vshard_router, space_name, original_tuple, opts) dev_checks('table', 'string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -175,7 +175,7 @@ end function insert.tuple(space_name, tuple, opts) checks('string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -214,7 +214,7 @@ end function insert.object(space_name, obj, opts) checks('string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', diff --git a/crud/len.lua b/crud/len.lua index a5dbe5d1..e33d7f60 100644 --- a/crud/len.lua +++ b/crud/len.lua @@ -1,6 +1,7 @@ local checks = require('checks') local errors = require('errors') +local call = require('crud.common.call') local utils = require('crud.common.utils') local dev_checks = require('crud.common.dev_checks') @@ -60,7 +61,10 @@ function len.call(space_name, opts) return nil, LenError:new("Space %q doesn't exist", space_name) end - local results, err = vshard_router:map_callrw(CRUD_LEN_FUNC_NAME, {space_name}, opts) + local results, err = call.map(vshard_router, CRUD_LEN_FUNC_NAME, {space_name}, { + mode = 'write', + timeout = opts.timeout, + }) if err ~= nil then return nil, LenError:new("Failed to call len on storage-side: %s", err) diff --git a/crud/readview.lua b/crud/readview.lua index de4e7a81..40d9a976 100644 --- a/crud/readview.lua +++ b/crud/readview.lua @@ -2,6 +2,7 @@ local fiber = require('fiber') local checks = require('checks') local errors = require('errors') +local call = require('crud.common.call') local const = require('crud.common.const') local stash = require('crud.common.stash') local utils = require('crud.common.utils') @@ -334,8 +335,15 @@ function Readview_obj.create(vshard_router, opts) setmetatable(readview, Readview_obj) readview._name = opts.name - local results, err, err_id = vshard_router:map_callrw(CRUD_OPEN_FUNC_NAME, - {readview._name}, {timeout = opts.timeout}) + local results, err, err_id = call.map( + vshard_router, + CRUD_OPEN_FUNC_NAME, + {readview._name}, + { + mode = "write", + timeout = opts.timeout, + } + ) if err ~= nil then return nil, ReadviewError:new( "Failed to call readview_open_on_storage on storage-side: storage id: %s err: %s", diff --git a/crud/replace.lua b/crud/replace.lua index 73c7b8a1..5e36906f 100644 --- a/crud/replace.lua +++ b/crud/replace.lua @@ -61,7 +61,7 @@ replace.storage_api = {[REPLACE_FUNC_NAME] = replace_on_storage} local function call_replace_on_router(vshard_router, space_name, original_tuple, opts) dev_checks('table', 'string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -174,7 +174,7 @@ end function replace.tuple(space_name, tuple, opts) checks('string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -213,7 +213,7 @@ end function replace.object(space_name, obj, opts) checks('string', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', diff --git a/crud/schema.lua b/crud/schema.lua index b3d3346d..57743ba9 100644 --- a/crud/schema.lua +++ b/crud/schema.lua @@ -42,6 +42,10 @@ schema.system_spaces = { -- https://github.com/tarantool/ddl/blob/b55d0ff7409f32e4d527e2d25444d883bce4163b/test/set_sharding_metadata_test.lua#L92-L98 ['_ddl_sharding_key'] = true, ['_ddl_sharding_func'] = true, + -- https://github.com/tarantool/tt-ee/blob/6045cd6f4f9b10fbba7e2c6abeecb8f856fee9b0/lib/migrations/internal/eval/body/lua/status_api.lua + ['_tt_migrations'] = true, + -- https://github.com/tarantool/cluster-federation/blob/01738cafa0dc7a3138e64f93c4e84cb323653257/src/internal/utils/utils.go#L17 + ['_cdc_state'] = true, } local function get_crud_schema(space) diff --git a/crud/select/compat/select.lua b/crud/select/compat/select.lua index abd40c19..ac8c3851 100644 --- a/crud/select/compat/select.lua +++ b/crud/select/compat/select.lua @@ -25,7 +25,7 @@ local function build_select_iterator(vshard_router, space_name, user_conditions, after = '?table|cdata', first = '?number', batch_size = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', field_names = '?table', yield_every = '?number', @@ -205,7 +205,7 @@ function select_module.pairs(space_name, user_conditions, opts) first = '?number', batch_size = '?number', use_tomap = '?boolean', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fields = '?table', fetch_latest_metadata = '?boolean', @@ -224,6 +224,13 @@ function select_module.pairs(space_name, user_conditions, opts) opts = opts or {} + if opts.bucket_id ~= nil then + local err = sharding.validate_bucket_id(opts.bucket_id) + if err ~= nil then + return error(err) + end + end + if opts.readview == true then if opts.mode ~= nil then return nil, SelectError:new("Readview does not support 'mode' option") @@ -321,7 +328,7 @@ local function select_module_call_xc(vshard_router, space_name, user_conditions, after = '?table|cdata', first = '?number', batch_size = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fields = '?table', fullscan = '?boolean', diff --git a/crud/select/compat/select_old.lua b/crud/select/compat/select_old.lua index 009f6dce..9f78a0e2 100644 --- a/crud/select/compat/select_old.lua +++ b/crud/select/compat/select_old.lua @@ -91,7 +91,7 @@ local function build_select_iterator(vshard_router, space_name, user_conditions, after = '?table', first = '?number', batch_size = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', field_names = '?table', yield_every = '?number', @@ -230,7 +230,7 @@ function select_module.pairs(space_name, user_conditions, opts) first = '?number', batch_size = '?number', use_tomap = '?boolean', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fields = '?table', fetch_latest_metadata = '?boolean', @@ -247,6 +247,13 @@ function select_module.pairs(space_name, user_conditions, opts) opts = opts or {} + if opts.bucket_id ~= nil then + local err = sharding.validate_bucket_id(opts.bucket_id) + if err ~= nil then + return nil, SelectError:new(err) + end + end + if opts.first ~= nil and opts.first < 0 then error(string.format("Negative first isn't allowed for pairs")) end @@ -422,7 +429,7 @@ function select_module.call(space_name, user_conditions, opts) after = '?table', first = '?number', batch_size = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', force_map_call = '?boolean', fields = '?table', fullscan = '?boolean', diff --git a/crud/select/merger.lua b/crud/select/merger.lua index b8e358e1..1fd75320 100644 --- a/crud/select/merger.lua +++ b/crud/select/merger.lua @@ -12,7 +12,7 @@ local merger_lib = compat.require('tuple.merger', 'merger') local Keydef = require('crud.compare.keydef') local stats = require('crud.stats') -local utils = require("crud.common.utils") +local utils = require('crud.common.utils') local function bswap_u16(num) return bit.rshift(bit.bswap(tonumber(num)), 16) @@ -171,11 +171,14 @@ local function fetch_chunk(context, state) -- change context.func_args too, but it does not matter next_func_args[4].after_tuple = cursor.after_tuple + local func_args_ext = utils.append_array({ box.session.effective_user(), func_name }, next_func_args) if context.readview then - next_state = {future = context.future_replica.conn:call(func_name, next_func_args, net_box_opts)} + next_state = {future = context.future_replica.conn:call("_crud.call_on_storage", + func_args_ext, net_box_opts)} else - local next_future = replicaset[vshard_call_name](replicaset, func_name, next_func_args, net_box_opts) + local next_future = replicaset[vshard_call_name](replicaset, "_crud.call_on_storage", + func_args_ext, net_box_opts) next_state = {future = next_future} end return next_state, buf @@ -200,8 +203,9 @@ local function new(vshard_router, replicasets, space, index_id, func_name, func_ local buf = buffer.ibuf() local net_box_opts = {is_async = true, buffer = buf, skip_header = utils.tarantool_supports_netbox_skip_header_option() or nil} - local future = replicaset[vshard_call_name](replicaset, func_name, func_args, - net_box_opts) + local func_args_ext = utils.append_array({ box.session.effective_user(), func_name }, func_args) + local future = replicaset[vshard_call_name](replicaset, "_crud.call_on_storage", + func_args_ext, net_box_opts) -- Create a source. local context = { @@ -275,7 +279,8 @@ local function new_readview(vshard_router, replicasets, readview_info, space, in local net_box_opts = {is_async = true, buffer = buf, skip_header = utils.tarantool_supports_netbox_skip_header_option() or nil} func_args[4].readview_id = replicaset_info.id - local future = replica.conn:call(func_name, func_args, net_box_opts) + local func_args_ext = utils.append_array({ box.session.effective_user(), func_name }, func_args) + local future = replica.conn:call("_crud.call_on_storage", func_args_ext, net_box_opts) -- Create a source. local context = { diff --git a/crud/stats/init.lua b/crud/stats/init.lua index 50832231..cd7b65a6 100644 --- a/crud/stats/init.lua +++ b/crud/stats/init.lua @@ -130,7 +130,7 @@ function stats.enable(opts) 'Can be enabled only on crud router' ) - opts = table.deepcopy(opts) or {} + opts = table.deepcopy(opts or {}) if opts.driver == nil then opts.driver = stats.get_default_driver() end diff --git a/crud/stats/metrics_registry.lua b/crud/stats/metrics_registry.lua index 315a649d..cae5106f 100644 --- a/crud/stats/metrics_registry.lua +++ b/crud/stats/metrics_registry.lua @@ -375,4 +375,4 @@ end workaround_role_reload() -return registry \ No newline at end of file +return registry diff --git a/crud/storage.lua b/crud/storage.lua index b00b1386..0b8ef770 100644 --- a/crud/storage.lua +++ b/crud/storage.lua @@ -4,6 +4,7 @@ local dev_checks = require('crud.common.dev_checks') local stash = require('crud.common.stash') local utils = require('crud.common.utils') +local call = require('crud.common.call') local sharding_metadata = require('crud.common.sharding.sharding_metadata') local insert = require('crud.insert') local insert_many = require('crud.insert_many') @@ -61,6 +62,7 @@ local function init_storage_call(user, storage_api) end local modules_with_storage_api = { + call, sharding_metadata, insert, insert_many, diff --git a/crud/update.lua b/crud/update.lua index 6e88e3c4..a5efdd53 100644 --- a/crud/update.lua +++ b/crud/update.lua @@ -85,7 +85,7 @@ update.storage_api = {[UPDATE_FUNC_NAME] = update_on_storage} local function call_update_on_router(vshard_router, space_name, key, user_operations, opts) dev_checks('table', 'string', '?', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', vshard_router = '?string|table', noreturn = '?boolean', @@ -230,7 +230,7 @@ end function update.call(space_name, key, user_operations, opts) checks('string', '?', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', fields = '?table', vshard_router = '?string|table', noreturn = '?boolean', diff --git a/crud/upsert.lua b/crud/upsert.lua index 3ddc94e5..5be7bc4a 100644 --- a/crud/upsert.lua +++ b/crud/upsert.lua @@ -58,7 +58,7 @@ upsert.storage_api = {[UPSERT_FUNC_NAME] = upsert_on_storage} local function call_upsert_on_router(vshard_router, space_name, original_tuple, user_operations, opts) dev_checks('table', 'string', '?', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -183,7 +183,7 @@ end function upsert.tuple(space_name, tuple, user_operations, opts) checks('string', '?', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', @@ -226,7 +226,7 @@ end function upsert.object(space_name, obj, user_operations, opts) checks('string', 'table', 'table', { timeout = '?number', - bucket_id = '?number|cdata', + bucket_id = '?', add_space_schema_hash = '?boolean', fields = '?table', vshard_router = '?string|table', diff --git a/crud/version.lua b/crud/version.lua index eeb6c5ca..8c84ab46 100644 --- a/crud/version.lua +++ b/crud/version.lua @@ -1,4 +1,4 @@ -- Сontains the module version. -- Requires manual update in case of release commit. -return '1.5.2' +return '1.6.0' diff --git a/deps.sh b/deps.sh index 06025176..a81155da 100755 --- a/deps.sh +++ b/deps.sh @@ -11,9 +11,10 @@ then fi # Test dependencies: -$TTCTL rocks install luatest 1.0.1 -$TTCTL rocks install luacheck 0.25.0 -$TTCTL rocks install luacov 0.13.0 +${TTCTL} rocks install luatest 1.0.1 +${TTCTL} rocks install luacheck 0.26.0 +${TTCTL} rocks install luacov 0.13.0 +${TTCTL} rocks install luacov-reporters 0.1.0 # cluacov, luacov-coveralls and dependencies $TTCTL rocks install https://raw.githubusercontent.com/luarocks/cluacov/master/cluacov-dev-1.rockspec @@ -33,15 +34,16 @@ $TTCTL rocks install "${LUACOV_COVERALLS_ROCKSPEC_FILE}" rm "${LUACOV_COVERALLS_ROCKSPEC_FILE}" rmdir "${TMPDIR}" -if [[ -n "$CARTRIDGE_VERSION" ]] -then - $TTCTL rocks install cartridge "$CARTRIDGE_VERSION" - $TTCTL rocks install migrations 0.4.2 -else - VSHARD_VERSION="${VSHARD_VERSION:-0.1.24}" - $TTCTL rocks install vshard "$VSHARD_VERSION" -fi +CARTRIDGE_VERSION="${CARTRIDGE_VERSION:-2.16.3}" +VSHARD_VERSION="${VSHARD_VERSION:-0.1.36}" +METRICS_VERSION="${METRICS_VERSION:-1.5.0}" +DDL_VERSION="${METRICS_VERSION:-1.7.1}" +MIGRATIONS_VERSION="${MIGRATIONS_VERSION:-1.1.0}" -$TTCTL rocks install ddl 1.6.2 +${TTCTL} rocks install cartridge "${CARTRIDGE_VERSION}" +${TTCTL} rocks install vshard "${VSHARD_VERSION}" +${TTCTL} rocks install metrics "${METRICS_VERSION}" +${TTCTL} rocks install ddl "${DDL_VERSION}" +${TTCTL} rocks install migrations "${MIGRATIONS_VERSION}" -$TTCTL rocks make +${TTCTL} rocks make \ No newline at end of file diff --git a/luarocks.patch b/luarocks.patch deleted file mode 100644 index 5c0a9d41..00000000 --- a/luarocks.patch +++ /dev/null @@ -1,20 +0,0 @@ -diff --git a/src/luarocks/manif.lua b/src/luarocks/manif.lua -index 34ae02da5..79a427819 100644 ---- a/src/luarocks/manif.lua -+++ b/src/luarocks/manif.lua -@@ -444,11 +444,10 @@ function manif.add_to_manifest(name, version, repo, deps_mode) - - local manifest, err = manif_core.load_local_manifest(rocks_dir) - if not manifest then -- util.printerr("No existing manifest. Attempting to rebuild...") -- -- Manifest built by `manif.make_manifest` should already -- -- include information about given name and version, -- -- no need to update it. -- return manif.make_manifest(rocks_dir, deps_mode) -+ util.printerr("No existing manifest. Creating an empty one...") -+ -- Create an empty manifest. -+ manifest, err = { repository = {}, modules = {}, commands = {} }, nil -+ manif_core.cache_manifest(rocks_dir, nil, manifest) - end - - local results = {[name] = {[version] = {{arch = "installed", repo = rocks_dir}}}} diff --git a/test/entrypoint/srv_not_initialized/cartridge_init.lua b/test/entrypoint/srv_not_initialized/cartridge_init.lua index 372cab45..943e3bcf 100755 --- a/test/entrypoint/srv_not_initialized/cartridge_init.lua +++ b/test/entrypoint/srv_not_initialized/cartridge_init.lua @@ -11,9 +11,12 @@ local cartridge = require('cartridge') if package.setsearchroot ~= nil then package.setsearchroot() else - package.path = package.path .. debug.sourcedir() .. "/?.lua;" + package.path = package.path .. debug.sourcedir() .. "/?.lua;" .. debug.sourcedir() .. "/?/init.lua;" end +local root = fio.dirname(fio.dirname(fio.dirname(debug.sourcedir()))) +package.path = package.path .. root .. "/?.lua;" .. root .. "/?/init.lua;" + local root = fio.dirname(fio.dirname(fio.dirname(debug.sourcedir()))) package.path = package.path .. root .. "/?.lua;" diff --git a/test/helper.lua b/test/helper.lua index 394b3584..a33e89b1 100644 --- a/test/helper.lua +++ b/test/helper.lua @@ -139,6 +139,7 @@ function helpers.box_cfg(opts) box.cfg({ memtx_dir = tempdir, wal_mode = 'none', + listen = opts.listen, }) fio.rmtree(tempdir) @@ -328,6 +329,12 @@ function helpers.call_on_servers(cluster, aliases, func) end end +function helpers.exec_on_cluster(cluster, func, ...) + for _, server in ipairs(cluster.servers) do + server:exec(func, ...) + end +end + -- Call given function for each server with the 'crud-storage' -- role. -- @@ -760,8 +767,16 @@ function helpers.get_command_log(router, call, args) require('log').error("crud fflush message") ]]) local captured = "" + local start_time = fiber.time() + local timeout = 2.0 + while not string.find(captured, "crud fflush message", 1, true) do captured = captured .. (capture:read() or "") + if fiber.time() - start_time > timeout then + capture:close() + t.skip() + end + fiber.sleep(0.01) end capture:close() @@ -942,6 +957,7 @@ function helpers.start_cluster(g, cartridge_cfg, vshard_cfg, tarantool3_cluster_ helpers.start_vshard_cluster(g, vshard_cfg) elseif opts.backend == helpers.backend.CONFIG then helpers.skip_if_tarantool3_crud_roles_unsupported() + helpers.skip_tarantool3_vshard_unsupported() helpers.start_tarantool3_cluster(g, tarantool3_cluster_cfg) end @@ -1507,8 +1523,22 @@ function helpers.skip_if_tarantool3_crud_roles_unsupported() ("Tarantool %s does not support crud roles"):format(version)) end +function helpers.skip_tarantool3_vshard_unsupported() + t.skip("vshard is not supported in config yet: " .. + "https://github.com/tarantool/tarantool-ee/issues/815") +end + function helpers.skip_if_not_config_backend(backend) t.skip_if(backend ~= helpers.backend.CONFIG, "The test is for Tarantool 3 with config only") end +function helpers.reset_call_cache(cluster) + helpers.call_on_storages(cluster, function(server) + server:exec(function() + local call_cache = require('crud.common.call_cache') + call_cache.reset() + end) + end) +end + return helpers diff --git a/test/integration/cfg_test.lua b/test/integration/cfg_test.lua index 5a7d4c96..41d7785f 100644 --- a/test/integration/cfg_test.lua +++ b/test/integration/cfg_test.lua @@ -157,7 +157,7 @@ group.test_role_cfg = function(g) stats_quantile_max_age_time = 180, } - g.router:upload_config({crud = cfg}) + g.router:upload_config({["crud"] = cfg}) local actual_cfg = g.router:eval("return require('crud').cfg") t.assert_equals(cfg, actual_cfg) @@ -172,7 +172,7 @@ group.test_role_partial_cfg = function(g) local cfg_after = table.deepcopy(cfg_before) cfg_after.stats = not cfg_before.stats - g.router:upload_config({crud = {stats = cfg_after.stats}}) + g.router:upload_config({["crud"] = {stats = cfg_after.stats}}) local actual_cfg = g.router:eval("return require('crud').cfg") t.assert_equals(cfg_after, actual_cfg, "Only requested field were updated") @@ -212,7 +212,7 @@ for name, case in pairs(role_cfg_error_cases) do helpers.skip_not_cartridge_backend(g.params.backend) local success, error = pcall(function() g.router:upload_config({ - crud = case.args, + ["crud"] = case.args, }) end) diff --git a/test/integration/count_test.lua b/test/integration/count_test.lua index b08196f8..2f1a93db 100644 --- a/test/integration/count_test.lua +++ b/test/integration/count_test.lua @@ -28,6 +28,7 @@ pgroup.before_each(function(g) helpers.truncate_space_on_cluster(g.cluster, 'customers') helpers.truncate_space_on_cluster(g.cluster, 'coord') helpers.truncate_space_on_cluster(g.cluster, 'book_translation') + helpers.truncate_space_on_cluster(g.cluster, 'interval') end) pgroup.test_count_non_existent_space = function(g) @@ -913,3 +914,32 @@ for case_name_template, case in pairs(read_scenario.gh_422_nullability_cases) do case(g, read_impl) end end + +pgroup.test_invalid_bucket_id_in_opts = function(g) + local invalid_values = { + "string", + {}, + true, + -1, + } + + for _, bucket_id in ipairs(invalid_values) do + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(bucket_id) + ) + + local result, err = g.router:call('crud.count', { + 'customers', + nil, + { + mode = 'write', + bucket_id = bucket_id, + fullscan = true, + } + }) + + t.assert_equals(result, nil) + t.assert_str_contains(err.err or err.str, expected_err) + end +end diff --git a/test/integration/custom_bucket_id_test.lua b/test/integration/custom_bucket_id_test.lua index a3d32b9b..74c6dc4c 100644 --- a/test/integration/custom_bucket_id_test.lua +++ b/test/integration/custom_bucket_id_test.lua @@ -555,3 +555,17 @@ pgroup.test_select = function(g) -- tuple is found t.assert_equals(#result.rows, 1) end + +pgroup.test_non_existent_bucket_id = function(g) + local _, err = g.router:call('crud.insert', { + 'customers', {1, box.NULL, 'Maria', 23}, {bucket_id = 999999} + }) + t.assert_equals(type(err), 'table') + t.assert_equals(err.class_name, 'InsertError') + t.assert_str_contains( + err.err, + 'Failed to call insert on storage-side: CallError: Failed to get router replicaset: ' + .. '{"bucket_id":999999,"code":9,"type":"ShardingError","message":"Bucket 999999 cannot' + .. ' be found. Is rebalancing in progress?","name":"NO_ROUTE_TO_BUCKET"}' + ) +end diff --git a/test/integration/pairs_readview_test.lua b/test/integration/pairs_readview_test.lua index df0a7144..5d81d8b2 100644 --- a/test/integration/pairs_readview_test.lua +++ b/test/integration/pairs_readview_test.lua @@ -30,7 +30,9 @@ pgroup.after_all(function(g) end) pgroup.before_each(function(g) + helpers.reset_call_cache(g.cluster) helpers.truncate_space_on_cluster(g.cluster, 'customers') + helpers.truncate_space_on_cluster(g.cluster, 'interval') end) @@ -944,3 +946,37 @@ for case_name_template, case in pairs(read_scenario.gh_422_nullability_cases) do case(g, read_impl) end end + +pgroup.test_invalid_bucket_id_in_readview_pairs = function(g) + local invalid_opts_list = { + {bucket_id = "string"}, + {bucket_id = {}}, + {bucket_id = true}, + {bucket_id = -1}, + } + + for _, opts in ipairs(invalid_opts_list) do + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(opts.bucket_id) + ) + local _, err = g.router:eval([[ + local crud = require('crud') + + local rv, err = crud.readview({name = 'foo'}) + if err ~= nil then + return nil, err + end + + local opts = ... + local _, err = pcall(function() + for _, _ in rv:pairs('customers', nil, opts) do end + end) + + rv:close() + return nil, err + ]], {opts}) + + t.assert_str_contains(err.err, expected_err) + end +end diff --git a/test/integration/pairs_test.lua b/test/integration/pairs_test.lua index 1744c490..f87a9322 100644 --- a/test/integration/pairs_test.lua +++ b/test/integration/pairs_test.lua @@ -24,7 +24,9 @@ pgroup.after_all(function(g) end) pgroup.before_each(function(g) + helpers.reset_call_cache(g.cluster) helpers.truncate_space_on_cluster(g.cluster, 'customers') + helpers.truncate_space_on_cluster(g.cluster, 'interval') end) @@ -951,3 +953,25 @@ for case_name_template, case in pairs(read_scenario.gh_422_nullability_cases) do case(g, read_impl) end end + +pgroup.test_invalid_bucket_id_pairs = function(g) + local invalid_opts_list = { + {bucket_id = "string"}, + {bucket_id = {}}, + {bucket_id = true}, + {bucket_id = -1}, + } + + for _, opts in ipairs(invalid_opts_list) do + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(opts.bucket_id) + ) + t.assert_error_msg_contains(expected_err, function() + g.router:eval([[ + local crud = require('crud') + crud.pairs('customers', nil, ...) + ]], {opts}) + end) + end +end diff --git a/test/integration/privileges_test.lua b/test/integration/privileges_test.lua new file mode 100644 index 00000000..74816417 --- /dev/null +++ b/test/integration/privileges_test.lua @@ -0,0 +1,606 @@ +local t = require('luatest') + +local helpers = require('test.helper') +local net_box = require('net.box') +local group_of_tests = t.group(nil, { + { + backend = helpers.backend.VSHARD, + backend_cfg = nil, + space_access_granted = true, + }, + { + backend = helpers.backend.VSHARD, + backend_cfg = nil, + space_access_granted = false, + }, +}) + +local ORIGINAL_ROWS = { + { id = 1, name = "Elizabeth", last_name = "Jackson", age = 12, city = "New York", }, + { id = 2, name = "Mary", last_name = "Brown", age = 46, city = "Los Angeles", }, + { id = 3, name = "David", last_name = "Smith", age = 33, city = "Los Angeles", }, + { id = 4, name = "William", last_name = "White", age = 81, city = "Chicago", }, + { id = 5, name = "James", last_name = "Johnson", age = 29, city = "Houston", }, + { id = 6, name = "Patricia", last_name = "Miller", age = 54, city = "Phoenix", }, + { id = 7, name = "Robert", last_name = "Davis", age = 40, city = "Philadelphia", }, + { id = 8, name = "Jennifer", last_name = "Garcia", age = 25, city = "San Antonio", }, + { id = 9, name = "Michael", last_name = "Martinez", age = 37, city = "San Diego", }, + { id = 10, name = "Linda", last_name = "Hernandez", age = 62, city = "Dallas", }, + { id = 11, name = "Charles", last_name = "Lopez", age = 50, city = "San Jose", }, + { id = 12, name = "Barbara", last_name = "Gonzalez", age = 45, city = "Austin", }, + { id = 13, name = "Joseph", last_name = "Wilson", age = 34, city = "Jacksonville", }, + { id = 14, name = "Susan", last_name = "Anderson", age = 28, city = "Fort Worth", }, + { id = 15, name = "Thomas", last_name = "Thomas", age = 70, city = "Columbus", }, + { id = 16, name = "Jessica", last_name = "Taylor", age = 31, city = "Charlotte", }, +} + +group_of_tests.before_all(function(g) + if (not helpers.tarantool_version_at_least(2, 11, 0)) + or (not require('luatest.tarantool').is_enterprise_package()) then + t.skip('Readview is supported only for Tarantool Enterprise starting from v2.11.0') + end + helpers.start_default_cluster(g, 'srv_select') + + g.space_format = g.cluster:server('s1-master').net_box.space.customers:format() +end) + +group_of_tests.after_all(function(g) + helpers.stop_cluster(g.cluster, g.params.backend) +end) + +group_of_tests.before_each(function(g) + helpers.truncate_space_on_cluster(g.cluster, 'customers') +end) + +local function privilegies_test_base_init(g, access_operation_type) + helpers.insert_objects(g, 'customers', ORIGINAL_ROWS) + helpers.exec_on_cluster(g.cluster, function(access_operation_type) + if not box.cfg.read_only then + local user = box.session.effective_user() + box.session.su('admin') + + box.schema.func.create('read_view_select', { + language = 'LUA', + if_not_exists = true, + body = [[ + function() + local rv = crud.readview() + local result, err = rv:select("customers") + rv:close() + + return result, err + end + ]] + }) + + box.schema.func.create('read_view_pairs', { + language = 'LUA', + if_not_exists = true, + body = [[ + function() + local rv = crud.readview() + local rows = {} + for _, row in rv:pairs('customers', {{'<=', 'age', 35}}, {use_tomap = true}) do + table.insert(rows, row) + end + rv:close() + + return rows + end + ]] + }) + + if box.schema.user.exists('testuser1') and box.space.customers then + box.schema.user.revoke('testuser1', 'read,write', 'space', 'customers', {if_exists = true}) + end + box.schema.user.drop('testuser1', {if_exists = true}) + + box.schema.user.create('testuser1', { password = 'secret' }) + if _TARANTOOL >= '3.0.0' then + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'box.session.user') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.select') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.get') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.insert_object') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.insert_object_many') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.replace_object') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.replace_object_many') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.update') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.upsert_object') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.upsert_object_many') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.delete') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.truncate') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.len') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.count') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.min') + box.schema.user.grant('testuser1', 'execute', 'lua_call', 'crud.max') + + box.schema.user.grant('testuser1', 'execute', 'function', 'read_view_select') + box.schema.user.grant('testuser1', 'execute', 'function', 'read_view_pairs') + else + box.schema.user.grant('testuser1', 'execute', 'universe') + end + + if access_operation_type and box.space.customers then + box.schema.user.grant('testuser1', access_operation_type, 'space', 'customers') + end + + box.session.su(user) + end + end, {g.params.space_access_granted and access_operation_type}) + + local conn = net_box.connect( + g.router.net_box_uri, + { + user = "testuser1", + password = "secret" + } + ) + t.assert_not_equals(conn, nil) + t.assert_equals(conn:is_connected(), true, conn.error) + + return conn +end + +local function tomap(tuple) + return { + id = tuple[1], + bucket_id = nil, + name = tuple[3], + last_name = tuple[4], + age = tuple[5], + city = tuple[6], + } +end + +group_of_tests.test_read_view_select = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "read_view_select") + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(#res.rows, #ORIGINAL_ROWS) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "ReadviewError: Space \"customers\" doesn't exist") + + t.assert_equals(res, nil) + end +end + +group_of_tests.test_read_view_pairs = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "read_view_pairs") + if g.params.space_access_granted then + t.assert_equals(ok, true, tostring(res)) + t.assert_equals(err, nil, err) + t.assert_equals(#res, 7) + else + t.assert_equals(ok, false) + t.assert_str_contains(tostring(res), "ReadviewError: Space \"customers\" doesn't exist") + end +end + +group_of_tests.test_select = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.select", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(#res.rows, #ORIGINAL_ROWS) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Space '#514' does not exist") + --TODO: После исправления TNTP-2295 использовать это проверку вместо предыдущей + --t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(res, nil) + end +end + +group_of_tests.test_insert = function(g) + local reference_record = { + id = 17, + name = "Ivan", + last_name = "Ivanovitch", + age = 42, + city = "Barnaul", + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.insert_object", {"customers", reference_record}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.get", {"customers", 17}) + t.assert_equals(err2, nil, err2) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(#actual_rows.rows, 1) + + local actual_row = tomap(actual_rows.rows[1]) + t.assert_equals(actual_row, reference_record) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(#actual_rows.rows, 0) + end +end + +group_of_tests.test_insert_many = function(g) + local reference_record_list = { + { id = 17, name = "Анна", last_name = "Иванова", age = 25, city = "Москва", }, + { id = 18, name = "محمد", last_name = "الزهراني", age = 40, city = "الرياض", }, + { id = 19, name = "Sophie", last_name = "Lefevre", age = 33, city = "Paris", }, + { id = 20, name = "Luca", last_name = "Rossi", age = 29, city = "Roma", }, + { id = 21, name = "Ming", last_name = "Wang", age = 45, city = "北京", }, + { id = 22, name = "Hiroshi", last_name = "Tanaka", age = 50, city = "東京", }, + { id = 23, name = "Carlos", last_name = "Fernández", age = 38, city = "Madrid", }, + { id = 24, name = "Fatima", last_name = "El Amrani", age = 27, city = "Casablanca", }, + { id = 25, name = "Johannes", last_name = "Schmidt", age = 60, city = "Berlin", }, + { id = 26, name = "Aarav", last_name = "Patel", age = 35, city = "Mumbai", }, + { id = 27, name = "Emily", last_name = "Smith", age = 22, city = "London", }, + { id = 28, name = "Mateo", last_name = "Gómez", age = 41, city = "Buenos Aires", }, + { id = 29, name = "Olga", last_name = "Petrova", age = 55, city = "Санкт-Петербург", }, + { id = 30, name = "Johan", last_name = "Andersson", age = 48, city = "Stockholm", }, + { id = 31, name = "Isabella", last_name = "Silva", age = 30, city = "São Paulo", }, + { id = 32, name = "Noah", last_name = "Dubois", age = 26, city = "Montréal", }, + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.insert_object_many", {"customers", reference_record_list}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows_qty, err2 = g.router:call("crud.len", {"customers"}) + t.assert_equals(err2, nil, err2) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_rows_qty, 32) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + for i = 1, #reference_record_list do + t.assert_str_contains(err[i].err, "Write access to space 'customers' is denied for user 'testuser1'") + end + + t.assert_equals(actual_rows_qty, 16) + end +end + +group_of_tests.test_replace = function(g) + local reference_record = { + id = 1, + name = "Ivan", + last_name = "Ivanovitch", + age = 42, + city = "Barnaul", + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.replace_object", {"customers", reference_record}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err2, nil, err2) + t.assert_equals(#actual_rows.rows, 1) + + local actual_row = tomap(actual_rows.rows[1]) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_row, reference_record) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(actual_row, ORIGINAL_ROWS[1]) + end +end + +group_of_tests.test_replace_many = function(g) + local reference_record_list = { + { id = 1, name = "Анна", last_name = "Иванова", age = 25, city = "Москва", }, + { id = 2, name = "محمد", last_name = "الزهراني", age = 40, city = "الرياض", }, + { id = 3, name = "Sophie", last_name = "Lefevre", age = 33, city = "Paris", }, + { id = 4, name = "Luca", last_name = "Rossi", age = 29, city = "Roma", }, + { id = 5, name = "Ming", last_name = "Wang", age = 45, city = "北京", }, + { id = 6, name = "Hiroshi", last_name = "Tanaka", age = 50, city = "東京", }, + { id = 7, name = "Carlos", last_name = "Fernández", age = 38, city = "Madrid", }, + { id = 8, name = "Fatima", last_name = "El Amrani", age = 27, city = "Casablanca", }, + { id = 9, name = "Johannes", last_name = "Schmidt", age = 60, city = "Berlin", }, + { id = 10, name = "Aarav", last_name = "Patel", age = 35, city = "Mumbai", }, + { id = 11, name = "Emily", last_name = "Smith", age = 22, city = "London", }, + { id = 12, name = "Mateo", last_name = "Gómez", age = 41, city = "Buenos Aires", }, + { id = 13, name = "Olga", last_name = "Petrova", age = 55, city = "Санкт-Петербург", }, + { id = 14, name = "Johan", last_name = "Andersson", age = 48, city = "Stockholm", }, + { id = 15, name = "Isabella", last_name = "Silva", age = 30, city = "São Paulo", }, + { id = 16, name = "Noah", last_name = "Dubois", age = 26, city = "Montréal", }, + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.replace_object_many", {"customers", reference_record_list}) + t.assert_equals(ok, true, tostring(res)) + + local actual_tuples, err2 = g.router:call("crud.select", {"customers"}) + t.assert_equals(err2, nil, err2) + t.assert_equals(#actual_tuples.rows, 16) + + local actual_rows = {} + for _, tuple in ipairs(actual_tuples.rows) do + table.insert(actual_rows, tomap(tuple)) + end + + table.sort(actual_rows, function(a, b) return a.id < b.id end) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_rows, reference_record_list) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + for i = 1, #reference_record_list do + t.assert_str_contains(err[i].err, "Write access to space 'customers' is denied for user 'testuser1'") + end + + t.assert_equals(actual_rows, ORIGINAL_ROWS) + end +end + +group_of_tests.test_update = function(g) + local reference_record = { + id = 1, name = "Elizabeth", last_name = "Jackson", + age = 13, city = "New York", + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.update", {"customers", 1, {{'+', 'age', 1}}}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err2, nil, err2) + t.assert_equals(#actual_rows.rows, 1) + + local actual_row = tomap(actual_rows.rows[1]) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_row, reference_record) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(actual_row, ORIGINAL_ROWS[1]) + end +end + +group_of_tests.test_upsert = function(g) + local reference_record = { + id = 1, name = "Elizabeth", last_name = "Jackson", + age = 13, city = "New York", + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.upsert_object", { + "customers", + reference_record, + {{'+', 'age', 1}}, + }) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err2, nil, err2) + t.assert_equals(#actual_rows.rows, 1) + + local actual_row = tomap(actual_rows.rows[1]) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_row, reference_record) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(actual_row, ORIGINAL_ROWS[1]) + end +end + +group_of_tests.test_delete = function(g) + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.delete", {"customers", 1}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err2, nil, err2) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + + res, err = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err, nil, err) + t.assert_equals(#res.rows, 0) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(#actual_rows.rows, 1) + local actual_row = tomap(actual_rows.rows[1]) + t.assert_equals(actual_row, ORIGINAL_ROWS[1]) + end +end + +group_of_tests.test_get = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.get", {"customers", 1}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(#res.rows, 1) + + local actual_row1 = tomap(res.rows[1]) + t.assert_equals(actual_row1, ORIGINAL_ROWS[1]) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + end +end + +group_of_tests.test_truncate = function(g) + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.truncate", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows, err2 = g.router:call("crud.select", {"customers"}) + t.assert_equals(err2, nil, err2) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(#actual_rows.rows, 0) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(#actual_rows.rows, #ORIGINAL_ROWS) + end +end + +group_of_tests.test_upsert_many = function(g) + local reference_rows = { + { id = 1, name = "Elizabeth", last_name = "Jackson", age = 13, city = "New York", }, + { id = 2, name = "Mary", last_name = "Brown", age = 47, city = "Los Angeles", }, + } + + local conn = privilegies_test_base_init(g, "write") + + local ok, res, err = pcall(conn.call, conn, "crud.upsert_object_many", { + "customers", { + {reference_rows[1], {{'+', 'age', 1}}}, + {reference_rows[2], {{'+', 'age', 1}}}, + }}) + t.assert_equals(ok, true, tostring(res)) + + local actual_rows1, err2 = g.router:call("crud.get", {"customers", 1}) + t.assert_equals(err2, nil, err2) + t.assert_equals(#actual_rows1.rows, 1) + + local actual_rows2, err3 = g.router:call("crud.get", {"customers", 2}) + t.assert_equals(err3, nil, err3) + t.assert_equals(#actual_rows2.rows, 1) + + local actual_rows = { + tomap(actual_rows1.rows[1]), + tomap(actual_rows2.rows[1]) + } + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(actual_rows, reference_rows) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err[1].str, "Write access to space 'customers' is denied for user 'testuser1'") + t.assert_str_contains(err[2].str, "Write access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(actual_rows, { ORIGINAL_ROWS[1], ORIGINAL_ROWS[2] }) + end +end + +group_of_tests.test_len = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.len", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(res, #ORIGINAL_ROWS) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(res, nil) + end +end + +group_of_tests.test_count = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.count", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + t.assert_equals(res, #ORIGINAL_ROWS) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(res, nil) + end +end + +group_of_tests.test_min = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.min", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + + local actual_row = tomap(res.rows[1]) + t.assert_equals(actual_row, ORIGINAL_ROWS[1]) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(res, nil) + end +end + +group_of_tests.test_max = function(g) + local conn = privilegies_test_base_init(g, "read") + + local ok, res, err = pcall(conn.call, conn, "crud.max", {"customers"}) + t.assert_equals(ok, true, tostring(res)) + + if g.params.space_access_granted then + t.assert_equals(err, nil, err) + + local actual_row = tomap(res.rows[1]) + t.assert_equals(actual_row, ORIGINAL_ROWS[16]) + else + t.assert_not_equals(err, nil) + t.assert_equals(type(err), "table") + t.assert_str_contains(err.str, "Read access to space 'customers' is denied for user 'testuser1'") + + t.assert_equals(res, nil) + end +end diff --git a/test/integration/read_scenario.lua b/test/integration/read_scenario.lua index 75a21e0d..1c1469f5 100644 --- a/test/integration/read_scenario.lua +++ b/test/integration/read_scenario.lua @@ -555,6 +555,9 @@ local gh_373_read_with_interval_condition_cases = { ['gh_373_%s_with_interval_single_condition_is_forbidden'] = function(cg, read) helpers.skip_interval_unsupported() + local _, err = cg.router:call("crud.insert", {"interval", {1, nil, datetime.interval.new{}}}) + t.assert_equals(err, nil) + local _, err = read(cg, 'interval', {{'>=', 'interval_field', datetime.interval.new{}}} @@ -567,6 +570,9 @@ local gh_373_read_with_interval_condition_cases = { ['gh_373_%s_with_interval_second_condition_is_forbidden'] = function(cg, read) helpers.skip_interval_unsupported() + local _, err = cg.router:call("crud.insert", {"interval", {1, nil, datetime.interval.new{}}}) + t.assert_equals(err, nil) + local _, err = read(cg, 'interval', {{'>=', 'id', 1}, {'>=', 'interval_field', datetime.interval.new{}}} diff --git a/test/integration/select_readview_test.lua b/test/integration/select_readview_test.lua index a524795c..841b485f 100644 --- a/test/integration/select_readview_test.lua +++ b/test/integration/select_readview_test.lua @@ -39,9 +39,11 @@ pgroup.after_all(function(g) end) pgroup.before_each(function(g) + helpers.reset_call_cache(g.cluster) helpers.truncate_space_on_cluster(g.cluster, 'customers') helpers.truncate_space_on_cluster(g.cluster, 'developers') helpers.truncate_space_on_cluster(g.cluster, 'cars') + helpers.truncate_space_on_cluster(g.cluster, 'interval') end) local function set_master(cluster, uuid, master_uuid) @@ -2562,3 +2564,34 @@ for case_name_template, case in pairs(read_scenario.gh_422_nullability_cases) do case(g, read_impl) end end + +pgroup.test_invalid_bucket_id_in_readview = function(g) + local invalid_opts_list = { + {bucket_id = "string"}, + {bucket_id = {}}, + {bucket_id = true}, + {bucket_id = -1}, + } + + for _, opts in ipairs(invalid_opts_list) do + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(opts.bucket_id) + ) + local _, err = g.router:eval([[ + local crud = require('crud') + + local rv, err = crud.readview({name = 'foo'}) + if err ~= nil then + return nil, err + end + + local _, err = rv:select('customers', nil, ...) + + rv:close() + return nil, err + ]], {opts}) + + t.assert_str_contains(err.err, expected_err) + end +end diff --git a/test/integration/select_test.lua b/test/integration/select_test.lua index d1e189a2..d5ecea52 100644 --- a/test/integration/select_test.lua +++ b/test/integration/select_test.lua @@ -28,10 +28,12 @@ pgroup.after_all(function(g) end) pgroup.before_each(function(g) + helpers.reset_call_cache(g.cluster) helpers.truncate_space_on_cluster(g.cluster, 'customers') helpers.truncate_space_on_cluster(g.cluster, 'developers') helpers.truncate_space_on_cluster(g.cluster, 'cars') helpers.truncate_space_on_cluster(g.cluster, 'logins') + helpers.truncate_space_on_cluster(g.cluster, 'interval') end) @@ -2080,7 +2082,7 @@ pgroup.test_storage_uninit_select_error_text = function(g) t.assert_equals(obj, nil) t.assert_str_contains(err.str, 'SelectError') t.assert_str_contains(err.str, 'NotInitialized') - t.assert_str_contains(err.str, "Function _crud.select_on_storage is not registered") + t.assert_str_contains(err.str, "Function '_crud.select_on_storage' is not registered") t.assert_str_contains(err.str, "crud isn't initialized on replicaset") t.assert_str_contains(err.str, "or crud module versions mismatch between router and storage") end @@ -2110,7 +2112,7 @@ pgroup.test_storage_uninit_get_error_text = function(g) t.assert_equals(obj, nil) t.assert_str_contains(err.str, 'GetError') t.assert_str_contains(err.str, 'NotInitialized') - t.assert_str_contains(err.str, "Function _crud.get_on_storage is not registered") + t.assert_str_contains(err.str, "Function '_crud.get_on_storage' is not registered") t.assert_str_contains(err.str, "crud isn't initialized on replicaset") t.assert_str_contains(err.str, "or crud module versions mismatch between router and storage") end @@ -2323,3 +2325,28 @@ for case_name_template, case in pairs(read_scenario.gh_422_nullability_cases) do case(g, read_impl) end end + +pgroup.test_select_invalid_bucket_id = function(g) + local invalid_opts_list = { + {bucket_id = "str"}, + {bucket_id = -1}, + {bucket_id = {}}, + {bucket_id = true}, + } + + for _, opts in ipairs(invalid_opts_list) do + opts.mode = 'write' + opts.fullscan = true + + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(opts.bucket_id) + ) + + local resp, err = g.router:call('crud.select', { + 'customers', nil, opts + }) + t.assert_equals(resp, nil) + t.assert_str_contains(err.err or err.str, expected_err) + end +end diff --git a/test/integration/simple_operations_test.lua b/test/integration/simple_operations_test.lua index 79387c1d..b7dffdca 100644 --- a/test/integration/simple_operations_test.lua +++ b/test/integration/simple_operations_test.lua @@ -1704,3 +1704,171 @@ pgroup.test_noreturn_opt = function(g) t.assert_not_equals(err, nil) t.assert_equals(result, nil) end + +pgroup.test_valid_bucket_id_values = function(g) + local valid_cases = { + { + description = 'bucket_id = nil (auto-detection)', + opts = {}, + id = 100, + }, + { + description = 'bucket_id = unsigned number', + opts = {bucket_id = 123}, + id = 101, + }, + } + + for _, case in ipairs(valid_cases) do + helpers.truncate_space_on_cluster(g.cluster, 'customers') + + local tuple = {case.id, box.NULL, 'Valid', 30} + local obj = {id = case.id + 1000, name = 'Valid', age = 31} + + local result, err = g.router:call('crud.insert', { + 'customers', tuple, case.opts + }) + t.assert_equals(err, nil, case.description .. ' (tuple)') + t.assert_not_equals(result, nil, case.description .. ' (tuple)') + + result, err = g.router:call('crud.insert_object', { + 'customers', obj, case.opts + }) + t.assert_equals(err, nil, case.description .. ' (object)') + t.assert_not_equals(result, nil, case.description .. ' (object)') + end +end + +pgroup.test_invalid_bucket_id_operations = function(g) + helpers.truncate_space_on_cluster(g.cluster, 'customers') + + local invalid_bucket_id = 'bad-id' + local expected_err = string.format('Invalid bucket_id: expected unsigned, got %s', type(invalid_bucket_id)) + + local key = 1 + local tuple = {key, invalid_bucket_id, 'Test', 42} + local tuple_clean = {key, box.NULL, 'Test', 42} + local object = {id = key + 1000, bucket_id = invalid_bucket_id, name = 'Test', age = 42} + local object_clean = {id = key + 1001, name = 'Test', age = 42} + local operations = {{'=', 'name', 'NewName'}} + + local crud_calls = { + {'crud.insert', {'customers', tuple, {}}}, + {'crud.insert', {'customers', tuple_clean, {bucket_id = invalid_bucket_id}}}, + + {'crud.insert_object', {'customers', object, {}}}, + {'crud.insert_object', {'customers', object_clean, {bucket_id = invalid_bucket_id}}}, + + {'crud.replace', {'customers', tuple, {}}}, + {'crud.replace', {'customers', tuple_clean, {bucket_id = invalid_bucket_id}}}, + + {'crud.replace_object', {'customers', object, {}}}, + {'crud.replace_object', {'customers', object_clean, {bucket_id = invalid_bucket_id}}}, + + {'crud.upsert', {'customers', tuple, operations, {}}}, + {'crud.upsert', {'customers', tuple_clean, operations, {bucket_id = invalid_bucket_id}}}, + + {'crud.upsert_object', {'customers', object, operations, {}}}, + {'crud.upsert_object', {'customers', object_clean, operations, {bucket_id = invalid_bucket_id}}}, + + {'crud.update', {'customers', key, operations, {bucket_id = invalid_bucket_id}}}, + {'crud.delete', {'customers', key, {bucket_id = invalid_bucket_id}}}, + } + + for _, call in ipairs(crud_calls) do + local func, args = call[1], call[2] + local _, err = g.router:call(func, args) + t.assert_str_contains(err.err or err[1].err, expected_err, func) + end +end + +pgroup.test_invalid_bucket_id_many_operations = function(g) + local invalid_bucket_id = 'bad-id' + local expected_err = string.format('Invalid bucket_id: expected unsigned, got %s', type(invalid_bucket_id)) + + local many_calls = { + { + func = 'crud.insert_many', + args = {'customers', { + {1, invalid_bucket_id, 'ManyInsert', 10}, + }}, + }, + { + func = 'crud.replace_many', + args = {'customers', { + {2, invalid_bucket_id, 'ManyReplace', 20}, + }}, + }, + { + func = 'crud.upsert_many', + args = {'customers', { + { + {3, invalid_bucket_id, 'ManyUpsert', 30}, + {{'=', 'name', 'replaced'}} + }, + }}, + }, + } + + for _, call in ipairs(many_calls) do + local _, err = g.router:call(call.func, call.args) + t.assert_str_contains(err.err or err[1].err, expected_err, call.func) + end +end + +pgroup.test_invalid_bucket_id_object_many_operations = function(g) + local invalid_bucket_id = 'bad-id' + local expected_err = string.format('Invalid bucket_id: expected unsigned, got %s', type(invalid_bucket_id)) + + local object_many_calls = { + { + func = 'crud.insert_object_many', + args = {'customers', { + {id = 1001, bucket_id = invalid_bucket_id, name = 'ManyObjInsert', age = 20}, + }}, + }, + { + func = 'crud.replace_object_many', + args = {'customers', { + {id = 1002, bucket_id = invalid_bucket_id, name = 'ManyObjReplace', age = 30}, + }}, + }, + { + func = 'crud.upsert_object_many', + args = {'customers', { + { + {id = 1003, bucket_id = invalid_bucket_id, name = 'ManyObjUpsert', age = 40}, + {{'=', 'age', 50}}, + }, + }}, + }, + } + + for _, call in ipairs(object_many_calls) do + local _, err = g.router:call(call.func, call.args) + t.assert_str_contains(err.err or err[1].err, expected_err, call.func) + end +end + +pgroup.test_get_invalid_bucket_id = function(g) + local invalid_values = { + "string", + {}, + true, + -1, + } + + for _, bucket_id in ipairs(invalid_values) do + local expected_err = string.format( + "Invalid bucket_id: expected unsigned, got %s", + type(bucket_id) + ) + local result, err = g.router:call('crud.get', { + 'customers', 1, {bucket_id = bucket_id, mode = 'write'} + }) + + t.assert_equals(result, nil) + t.assert_str_contains(err.err or err.str, expected_err) + end +end + diff --git a/test/integration/stats_test.lua b/test/integration/stats_test.lua index 39b313be..a8f9b082 100644 --- a/test/integration/stats_test.lua +++ b/test/integration/stats_test.lua @@ -62,7 +62,7 @@ local call_cfg = function(g, way, cfg) ]], { cfg }) elseif way == 'role' then if g.params.backend == helpers.backend.CARTRIDGE then - g.router:upload_config{crud = cfg} + g.router:upload_config{["crud"] = cfg} elseif g.params.backend == helpers.backend.CONFIG then local cluster_cfg = g.cluster:cfg() diff --git a/test/integration/storages_state_test.lua b/test/integration/storages_state_test.lua index bcaa7918..7dc8f07b 100644 --- a/test/integration/storages_state_test.lua +++ b/test/integration/storages_state_test.lua @@ -83,33 +83,54 @@ local function build_storage_info(g, array_info) return res end +local function ordered_keys_for_results(g) + local is_vshard = g.params.backend == 'vshard' + local is_config = g.params.backend == 'config' + + local name_as_key = is_vshard and ( + type(g.params.backend_cfg) == 'table' + and g.params.backend_cfg.identification_mode == 'name_as_key' + ) or is_config + + if name_as_key then + return { + 's1-master', + 's1-replica', + 's2-master', + 's2-replica', + } + end + + return { + helpers.uuid('b', 1), + helpers.uuid('b', 10), + helpers.uuid('c', 1), + helpers.uuid('c', 10), + } +end + pgroup.test_crud_storage_status_of_stopped_servers = function(g) g.cluster:server("s2-replica"):stop() local results, err = g.router:call("crud.storage_info", {}) t.assert_equals(err, nil, "Error getting storages info") - t.assert_equals(results, build_storage_info(g, { - { - status = "running", - is_master = true - }, - { - status = "running", - is_master = false - }, - { - status = "running", - is_master = true - }, - { - status = "error", - is_master = false, - message = "Peer closed" - }, - })) -end + local keys = ordered_keys_for_results(g) + + local expected = { + [keys[1]] = { status = "running", is_master = true }, + [keys[2]] = { status = "running", is_master = false }, + [keys[3]] = { status = "running", is_master = true }, + [keys[4]] = { status = "error", is_master = false }, + } + for _, k in ipairs(keys) do + local got = results[k] + t.assert_not_equals(got, nil, ("No result for key %s"):format(k)) + t.assert_equals(got.status, expected[k].status, ("status mismatch for %s"):format(k)) + t.assert_equals(got.is_master, expected[k].is_master, ("is_master mismatch for %s"):format(k)) + end +end pgroup.after_test('test_crud_storage_status_of_stopped_servers', function(g) helpers.stop_cluster(g.cluster, g.params.backend) g.cluster = nil diff --git a/test/integration/vshard_custom_test.lua b/test/integration/vshard_custom_test.lua index cfcb22df..8677037d 100644 --- a/test/integration/vshard_custom_test.lua +++ b/test/integration/vshard_custom_test.lua @@ -217,10 +217,12 @@ pgroup.before_all(function(g) end) pgroup.before_each(function(g) + helpers.reset_call_cache(g.cluster) helpers.truncate_space_on_cluster(g.cluster, 'customers') helpers.truncate_space_on_cluster(g.cluster, 'customers_ddl') helpers.truncate_space_on_cluster(g.cluster, 'locations') helpers.truncate_space_on_cluster(g.cluster, 'locations_ddl') + helpers.truncate_space_on_cluster(g.cluster, 'interval') g.router:call('prepare_data', {'customers', 'customers', {1, box.NULL, 'Akiyama Shun', 32}}) g.router:call('prepare_data', {'customers', 'customers', {2, box.NULL, 'Kazuma Kiryu', 41}}) diff --git a/test/luacov-merger.lua b/test/luacov-merger.lua new file mode 100644 index 00000000..6c80f716 --- /dev/null +++ b/test/luacov-merger.lua @@ -0,0 +1,71 @@ +-- Utility merges luacov coverage statistic files by coverage percentage into a single result +-- USAGE: ... + +local function read_file(filename) + local file = io.open(filename, "r") + if not file then + error("Failed to open file: " .. filename) + end + + local data = {} + for line in file:lines() do + table.insert(data, line) + end + + file:close() + return data +end + +local function merge_coverage_data(files) + local coverage_data = {} + + for _, filename in ipairs(files) do + local data = read_file(filename) + + for i = 1, #data, 2 do + local header = data[i] + local counts = data[i + 1] + + local file_path = header:match(":(.+)") + local line_counts = {} + + for count in counts:gmatch("%d+") do + table.insert(line_counts, tonumber(count)) + end + + if not coverage_data[file_path] then + coverage_data[file_path] = line_counts + else + for j = 1, #line_counts do + coverage_data[file_path][j] = (coverage_data[file_path][j] or 0) + line_counts[j] + end + end + end + end + + return coverage_data +end + +local function write_merged_data_to_file(coverage_data, output_filename) + local file = io.open(output_filename, "w") + if not file then + error("Failed to open file for writing: " .. output_filename) + end + + for file_path, counts in pairs(coverage_data) do + file:write(#counts .. ":" .. file_path .. "\n") + file:write(table.concat(counts, " ") .. "\n") + end + + file:close() +end + +local files_list = table.deepcopy(arg) +files_list[-1] = nil +files_list[0] = nil +local output_filename = files_list[1] +table.remove(files_list, 1) + +local merged_data = merge_coverage_data(files_list) +write_merged_data_to_file(merged_data, output_filename) +print("Luacovs merge: Done") diff --git a/test/tarantool3_helpers/cluster_test.lua b/test/tarantool3_helpers/cluster_test.lua index a27e8f0b..34a3e550 100644 --- a/test/tarantool3_helpers/cluster_test.lua +++ b/test/tarantool3_helpers/cluster_test.lua @@ -7,6 +7,7 @@ local g = t.group() g.before_all(function(cg) helpers.skip_if_tarantool3_crud_roles_unsupported() + helpers.skip_tarantool3_vshard_unsupported() local config = { credentials = { diff --git a/test/unit/call_test.lua b/test/unit/call_test.lua index e7e910b6..f4e21926 100644 --- a/test/unit/call_test.lua +++ b/test/unit/call_test.lua @@ -34,7 +34,7 @@ pgroup.test_map_non_existent_func = function(g) t.assert_equals(results, nil) helpers.assert_str_contains_pattern_with_replicaset_id(err.err, "Failed for [replicaset_id]") - t.assert_str_contains(err.err, "Function non_existent_func is not registered") + t.assert_str_contains(err.err, "Function 'non_existent_func' is not registered") end pgroup.test_single_non_existent_func = function(g) @@ -47,7 +47,7 @@ pgroup.test_single_non_existent_func = function(g) t.assert_equals(results, nil) helpers.assert_str_contains_pattern_with_replicaset_id(err.err, "Failed for [replicaset_id]") - t.assert_str_contains(err.err, "Function non_existent_func is not registered") + t.assert_str_contains(err.err, "Function 'non_existent_func' is not registered") end pgroup.test_map_invalid_mode = function(g) diff --git a/test/unit/get_self_vshard_replicaset_test.lua b/test/unit/get_self_vshard_replicaset_test.lua new file mode 100644 index 00000000..f5ff6a7e --- /dev/null +++ b/test/unit/get_self_vshard_replicaset_test.lua @@ -0,0 +1,402 @@ +-- test for https://github.com/tarantool/crud-ee/issues/16 + +local t = require('luatest') +local vshard_utils = require('crud.common.vshard_utils') + +local g = t.group('get_self_vshard_replicaset') + +g.before_each(function(cg) + cg.__get_storage_info = vshard_utils.__get_storage_info + cg.__get_box_info = vshard_utils.__get_box_info +end) + +g.after_each(function(cg) + vshard_utils.__get_storage_info = cg.__get_storage_info + vshard_utils.__get_box_info = cg.__get_box_info +end) + +local storage_info_with_instances_names = { + replicasets = { + ["storage-1"] = { + master = "auto", + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc", + name = "storage-1" + }, + ["storage-2"] = { + master = "auto", + uuid = "ef85f92c-9ad7-4bb3-b27a-8f1edb440ce3", + name = "storage-2" + } + }, + bucket = { + receiving = 0, + active = 0, + total = 0, + garbage = 0, + pinned = 0, + sending = 0 + }, + uri = "admin@localhost:3303", + identification_mode = "name_as_key", + status = 0, + replication = { + status = "master" + }, + alerts = {}, +} + +local box_info_with_instances_names = { + version = "3.1.0-0-g663f509a2", + id = 2, + ro = true, + uuid = "2cbd467d-4026-4f85-968d-622dea28fe5a", + pid = 1, + replicaset = { + uuid = "764f6e67-17f7-4deb-a3f0-784436b0327d", + name = "storage-1" + }, + schema_version = 92, + listen = "172.21.0.14:3301", + replication_anon = { + count = 0 + }, + replication = { + { + id = 1, + uuid = "a56e901d-841f-46b8-99cf-0943fcc960b9", + lsn = 30149, + upstream = { + status = "follow", + idle = 0.047537165999984, + peer = "replicator@tarantool-storage-1-msk:3301", + lag = 0.00072526931762695, + name = "storage-1-msk" + }, + downstream = { + status = "follow", + idle = 0.54811758300002, + vclock = { [1] = 30149 }, + lag = 0 + } + }, + { + id = 2, + uuid = "2cbd467d-4026-4f85-968d-622dea28fe5a", + lsn = 0, + name = "storage-1-spb" + }, + { + id = 3, + uuid = "b0b1037f-95e9-491b-b37c-298bed9286e9", + lsn = 0, + upstream = { + status = "follow", + idle = 0.97080358300002, + peer = "replicator@tarantool-storage-1-brn:3301", + lag = 0.00027346611022949, + name = "storage-1-brn" + }, + downstream = { + status = "follow", + idle = 0.94219958300005, + vclock = { [1] = 30149 }, + lag = 0 + }, + }, + }, + hostname = "10848594b67d", + election = { + state = "follower", + vote = 0, + leader = 0, + term = 1, + signature = 30149, + }, + synchro = { + queue = { + owner = 0, + term = 0, + len = 0, + busy = false + }, + quorum = 2, + status = "running" + }, + sql = {}, + vclock = { [1] = 30149 }, + uptime = 547, + lsn = 0, + vinyl = {}, + ro_reason = "config", + memory = {}, + gc = {}, + cluster = { + name = "storage-1-spb" + }, + package = "Tarantool Enterprise" +} + +local box_info_with_instances_uuids = { + version = "2.11.3-0-ge45691111", + id = 1, + ro = true, + uuid = "ac9fcc18-e7e2-471b-bf46-97680a3615ad", + pid = 5312, + cluster = { + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc" + }, + schema_version = 114, + listen = "127.0.0.1:3303", + replication_anon = { + count = 0 + }, + replication = { + { + id = 1, + uuid = "ac9fcc18-e7e2-471b-bf46-97680a3615ad", + lsn = 30141 + }, + { + id = 2, + uuid = "14de6d0b-78ae-43c6-9f75-e41e15b72ff0", + lsn = 0, + upstream = { + status = "follow", + idle = 0.52450900012627, + peer = "admin@localhost:3304", + lag = 9.8943710327148e-05 + }, + downstream = { + status = "follow", + idle = 0.49833300011232, + vclock = { [1] = 30141 }, + lag = 0 + } + } + }, + election = { + state = "follower", + vote = 0, + leader = 0, + term = 1, + signature = 30141 + }, + synchro = { + queue = { + owner = 0, + term = 0, + len = 0, + busy = false + }, + quorum = 2, + status = "running" + }, + vclock = { [1] = 30141 }, + uptime = 272, + lsn = 30141, + ro_reason = "config", + sql = {}, + gc = {}, + vinyl = {}, + memory = {}, + package = "Tarantool Enterprise" +} + + +local storage_info_with_instances_uuids = { + replicasets = { + ["ef85f92c-9ad7-4bb3-b27a-8f1edb440ce3"] = { + master = nil, + uri = "admin@localhost:3306", + uuid = "ef85f92c-9ad7-4bb3-b27a-8f1edb440ce3" + }, + ["ae2de070-8769-4ffa-9942-8141ce0b78cc"] = { + master = nil, + uri = "admin@localhost:3304", + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc" + } + }, + bucket = { + receiving = 0, + active = 15000, + total = 15000, + garbage = 0, + pinned = 0, + sending = 0 + }, + uri = "admin@localhost:3303", + identification_mode = "uuid_as_key", + status = 0, + replication = { + status = "follow", + lag = 0.00037693977355957 + }, + alerts = {} +} + +local storage_info_with_instances_names_on_2_11_upgrage = { + replicasets = { + ["storage-1"] = { + master = "auto", + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc", + name = "storage-1" + }, + ["storage-2"] = { + master = "auto", + uuid = "ef85f92c-9ad7-4bb3-b27a-8f1edb440ce3", + name = "storage-2" + } + }, + bucket = { + receiving = 0, + active = 15000, + total = 15000, + garbage = 0, + pinned = 0, + sending = 0 + }, + uri = "admin@localhost:3303", + identification_mode = "name_as_key", + status = 2, + replication = { + status = "master" + }, + alerts = { + {"UNREACHABLE_REPLICA", "Replica cdata: NULL isn't active"}, + {"LOW_REDUNDANCY", "Only one replica is active"} + } +} + +local box_info_with_instances_names_on_2_11_upgrage = { + version = "3.1.0-0-g663f509a2", + id = 1, + ro = false, + uuid = "ac9fcc18-e7e2-471b-bf46-97680a3615ad", + pid = 23500, + cluster = { + name = box.NULL + }, + schema_version = 114, + listen = "[::1]:3303", + replication_anon = { + count = 0 + }, + replication = { + { + id = 1, + uuid = "ac9fcc18-e7e2-471b-bf46-97680a3615ad", + lsn = 30141, + name = box.NULL + }, + { + id = 2, + uuid = "14de6d0b-78ae-43c6-9f75-e41e15b72ff0", + lsn = 0, + upstream = { + status = "follow", + idle = 0.017113999929279, + peer = "admin@localhost:3304", + lag = 0.00025582313537598 + }, + name = box.NULL, + downstream = { + status = "follow", + idle = 0.042480000061914, + vclock = { [1] = 30141 }, + lag = 0 + } + } + }, + election = { + state = "follower", + vote = 0, + leader = 0, + term = 1 + }, + signature = 30141, + synchro = { + queue = { + owner = 0, + term = 0, + len = 0, + busy = false + }, + quorum = 2 + }, + status = "running", + hostname = "hostname", + vclock = { [1] = 30141 }, + uptime = 2398, + lsn = 30141, + sql = {}, + vinyl = {}, + memory = {}, + gc = {}, + replicaset = { + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc", + name = box.NULL + }, + name = box.NULL, + package = "Tarantool Enterprise" +} + +g.test_use_names = function() + -- happens when tarantool 3.1 starts on 3.1 data + vshard_utils.__get_box_info = function() + return box_info_with_instances_names + end + vshard_utils.__get_storage_info = function() + return true, storage_info_with_instances_names + end + vshard_utils.is_schema_needs_upgrade_from_2_11 = function() + return false + end + vshard_utils.get_vshard_identification_mode = function() + return "name_as_key" + end + local name, respicaset = vshard_utils.get_self_vshard_replicaset() + t.assert_equals(name, "storage-1") + t.assert_equals(respicaset, {master = "auto", name = "storage-1", uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc"}) +end + +g.test_before_2_11_upgrage = function() + -- happens when tarantool 3.1 starts on 2.11 data + vshard_utils.__get_box_info = function() + return box_info_with_instances_names_on_2_11_upgrage + end + vshard_utils.__get_storage_info = function() + return true, storage_info_with_instances_names_on_2_11_upgrage + end + vshard_utils.is_schema_needs_upgrade_from_2_11 = function() + return true + end + vshard_utils.get_vshard_identification_mode = function() + return 'name_as_key' + end + local uuid, respicaset = vshard_utils.get_self_vshard_replicaset() + t.assert_equals(uuid, "ae2de070-8769-4ffa-9942-8141ce0b78cc") + t.assert_equals(respicaset, { + master = "auto", + name = "storage-1", + uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc", + }) +end + +g.test_use_uuid = function() + -- happens when tarantool 2.11 starts on 2.11 data + vshard_utils.__get_box_info = function() + return box_info_with_instances_uuids + end + vshard_utils.__get_storage_info = function() + return true, storage_info_with_instances_uuids + end + vshard_utils.is_schema_needs_upgrade_from_2_11 = function() + return false + end + vshard_utils.get_vshard_identification_mode = function() + return 'uuid_as_key' + end + local uuid, respicaset = vshard_utils.get_self_vshard_replicaset() + t.assert_equals(uuid, "ae2de070-8769-4ffa-9942-8141ce0b78cc") + t.assert_equals(respicaset, {uri = "admin@localhost:3304", uuid = "ae2de070-8769-4ffa-9942-8141ce0b78cc"}) +end diff --git a/test/unit/privileges_test.lua b/test/unit/privileges_test.lua new file mode 100644 index 00000000..51cec2c5 --- /dev/null +++ b/test/unit/privileges_test.lua @@ -0,0 +1,40 @@ +local t = require("luatest") +local g = t.group() + +local helper = require("test.helper") +local call = require("crud.common.call") + +g.before_all(function() + helper.box_cfg({listen = 3401}) + + box.schema.user.create("unittestuser", {password = "secret", if_not_exists = true}) + box.schema.user.grant("unittestuser", "read,write,execute,create,alter,drop", "universe", nil, + {if_not_exists = true}) + + rawset(_G, "unittestfunc", function(...) + return ... + end) +end) + +g.test_prepend_current_user_smoke = function() + local res = call.storage_api.call_on_storage(box.session.effective_user(), "unittestfunc", {"too", "foo"}) + t.assert_equals(res, {"too", "foo"}) +end + +g.test_non_existent_user = function() + t.assert_error_msg_contains("User 'non_existent_user' is not found", + call.storage_api.call_on_storage, "non_existent_user", "unittestfunc") +end + +g.test_that_the_session_switches_back = function() + rawset(_G, "unittestfunc2", function() + return box.session.effective_user() + end) + + local reference_user = box.session.effective_user() + t.assert_not_equals(reference_user, "unittestuser") + + local res = call.storage_api.call_on_storage("unittestuser", "unittestfunc2") + t.assert_equals(res, "unittestuser") + t.assert_equals(box.session.effective_user(), reference_user) +end diff --git a/test/unit/utils_append_array_test.lua b/test/unit/utils_append_array_test.lua new file mode 100644 index 00000000..401d7a1a --- /dev/null +++ b/test/unit/utils_append_array_test.lua @@ -0,0 +1,14 @@ +local t = require("luatest") +local g = t.group() + +local utils = require("crud.common.utils") + +g.test_append_void = function() + local res = utils.append_array({"too, foo"}) + t.assert_equals(res, {"too, foo"}) +end + +g.test_concat = function() + local res = utils.append_array({"too, foo"}, {"bar, baz, buzz"}) + t.assert_equals(res, {"too, foo", "bar, baz, buzz"}) +end diff --git a/test/unit/utils_test.lua b/test/unit/utils_test.lua index 93ca0774..6391f04b 100644 --- a/test/unit/utils_test.lua +++ b/test/unit/utils_test.lua @@ -1,4 +1,5 @@ local fun = require('fun') +local ffi = require('ffi') local t = require('luatest') local g = t.group('utils') @@ -355,3 +356,25 @@ for name, case in pairs(is_version_in_range_cases) do t.assert_equals(utils.is_version_in_range(unpack_N(case.args, 15)), case.res) end end + +local is_uint_cases = { + positive_number = {value = 1, expected = true}, + zero = {value = 0, expected = true}, + negative_number = {value = -1,expected = false}, + non_integer_number = {value = 123.45, expected = false}, + string_value = {value = '123', expected = false}, + table_value = {value = {}, expected = false}, + boolean_value = {value = true, expected = false}, + nil_value = {value = nil, expected = false}, + box_null = {value = box.NULL, expected = false}, + ffi_uint64 = {value = ffi.new('uint64_t', 1), expected = true}, + ffi_uint64_zero = {value = ffi.new('uint64_t', 0), expected = true}, + ffi_int64_negative = {value = ffi.new('int64_t', -1), expected = false}, +} + +for name, case in pairs(is_uint_cases) do + g["test_is_uint_" .. name] = function() + local actual = utils.is_uint(case.value) + t.assert_equals(actual, case.expected) + end +end diff --git a/test/unit/validate_bucket_id_test.lua b/test/unit/validate_bucket_id_test.lua new file mode 100644 index 00000000..8f3f35b2 --- /dev/null +++ b/test/unit/validate_bucket_id_test.lua @@ -0,0 +1,36 @@ +local t = require('luatest') +local g = t.group() + +local sharding = require('crud.common.sharding') + +local ffi = require('ffi') + +local cases = { + positive_number = {value = 1, should_fail = false}, + large_number = {value = 100000, should_fail = false}, + zero = {value = 0, should_fail = true}, + negative_number = {value = -1, should_fail = true}, + non_integer_number = {value = 123.45, should_fail = true}, + string_value = {value = 'abc', should_fail = true}, + boolean_value = {value = true, should_fail = true}, + table_value = {value = {}, should_fail = true}, + nil_value = {value = nil, should_fail = true}, + box_null = {value = box.NULL, should_fail = true}, + ffi_uint64 = {value = ffi.new('uint64_t', 1), should_fail = false}, + ffi_uint64_zero = {value = ffi.new('uint64_t', 0), should_fail = true}, + ffi_int64_negative = {value = ffi.new('int64_t', -1), should_fail = true}, +} + +for name, case in pairs(cases) do + g["test_validate_bucket_id_" .. name] = function() + local err = sharding.validate_bucket_id(case.value) + + if case.should_fail then + t.assert(err, ('%s should be rejected'):format(name)) + t.assert_equals(err.class_name, 'BucketIDError') + t.assert_str_contains(err.err, 'expected unsigned') + else + t.assert_equals(err, nil, ('%s should be accepted'):format(name)) + end + end +end diff --git a/test/vshard_helpers/vtest.lua b/test/vshard_helpers/vtest.lua index be687b69..c4448515 100644 --- a/test/vshard_helpers/vtest.lua +++ b/test/vshard_helpers/vtest.lua @@ -525,7 +525,7 @@ local function cluster_new(g, cfg) -- Logged in as guest with 'super' access rights. Yet 'super' is not -- enough to grant 'replication' privilege. The simplest way - login -- as admin for that temporary. - local user = box.session.user() + local user = box.session.effective_user() box.session.su('admin') cfg.engine = nil