diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 689275cff115..375912667d63 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -17,7 +17,7 @@ /plugins/storage/volume/linstor @rp- /plugins/storage/volume/storpool @slavkap -/plugins/storage/volume/ontap @rajiv1 @sandeeplocharla @piyush5 @suryag +/plugins/storage/volume/ontap @rajiv-jain-netapp @sandeeplocharla @piyush5netapp @suryag1201 .pre-commit-config.yaml @jbampton /.github/linters/ @jbampton diff --git a/.github/workflows/daily-repo-status.lock.yml b/.github/workflows/daily-repo-status.lock.yml deleted file mode 100644 index 1d7e7eecd14d..000000000000 --- a/.github/workflows/daily-repo-status.lock.yml +++ /dev/null @@ -1,1022 +0,0 @@ -# -# ___ _ -# / _ \ | | (_) -# | |_| | __ _ ___ _ __ | |_ _ -# | _ |/ _` |/ _ \ '_ \| __| |/ __| -# | | | | (_| | __/ | | | |_| | ( -# \_| |_/\__, |\___|_| |_|\__|_|\___| -# __/ | -# _ _ |___/ -# | | | | / _| | -# | | | | ___ _ __ _ __| |_| | _____ -# | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___| -# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \ -# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/ -# -# This file was automatically generated by gh-aw (v0.45.0). DO NOT EDIT. -# -# To update this file, edit githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87 and run: -# gh aw -# Not all edits will cause changes to this file. -# -# For more information: https://github.github.com/gh-aw/introduction/overview/ -# -# This workflow creates daily repo status reports. It gathers recent -# activity (issues, PRs, discussions, releases, code changes) and -# engaging GitHub issues with productivity insights, community highlights, -# and project recommendations. -# -# Source: githubnext/agentics/workflows/daily-repo-status.md@ -# -# frontmatter-hash: - -name: "Daily Repo Status" -"on": - schedule: - - cron: "25 18 * * *" - # Friendly format: daily (scattered) - workflow_dispatch: - -permissions: {} - -concurrency: - group: "gh-aw-${{ github.workflow }}" - -run-name: "Daily Repo Status" - -jobs: - activation: - runs-on: ubuntu- - permissions: - contents: - outputs: - comment_id: "" - comment_repo: "" - steps: - - name: Setup - uses: github/gh-aw/actions/setup@58d1d157fbac0f1204798500faefc4f7461ebe28 # v0.45. - with: - destination: /opt/gh-aw/ - - name: Check workflow file - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_WORKFLOW_FILE: "daily-repo-status.lock.yml" - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/check_workflow_timestamp_api.cjs'); - await main(); - - agent: - needs: - runs-on: ubuntu- - permissions: - contents: - issues: - pull-requests: - concurrency: - group: "gh-aw-copilot-${{ github.workflow }}" - env: - DEFAULT_BRANCH: ${{ github.event.repository.default_branch }} - GH_AW_ASSETS_ALLOWED_EXTS: "" - GH_AW_ASSETS_BRANCH: "" - GH_AW_ASSETS_MAX_SIZE_KB: - GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/ - GH_AW_SAFE_OUTPUTS: /opt/gh-aw/safeoutputs/outputs. - GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config. - GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools. - GH_AW_WORKFLOW_ID_SANITIZED: - outputs: - checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }} - has_patch: ${{ steps.collect_output.outputs.has_patch }} - model: ${{ steps.generate_aw_info.outputs.model }} - output: ${{ steps.collect_output.outputs.output }} - output_types: ${{ steps.collect_output.outputs.output_types }} - secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }} - steps: - - name: Setup - uses: github/gh-aw/actions/setup@58d1d157fbac0f1204798500faefc4f7461ebe28 # v0.45. - with: - destination: /opt/gh-aw/ - - name: Checkout - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0. - with: - persist-credentials: - - name: Create gh-aw temp - run: bash /opt/gh-aw/actions/create_gh_aw_tmp_dir. - - name: Configure Git - env: - REPO_NAME: ${{ github.repository }} - SERVER_URL: ${{ github.server_url }} - run: | - git config --global user.email "github-actions[bot]@users.noreply.github.com" - git config --global user.name "github-actions[bot]" - # Re-authenticate git with GitHub - SERVER_URL_STRIPPED="${SERVER_URL#https://}" - git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git" - echo "Git configured with standard GitHub Actions identity" - - name: Checkout PR - id: checkout- - if: | - github.event. - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - with: - github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/checkout_pr_branch.cjs'); - await main(); - - name: Generate agentic run - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - with: - script: | - const fs = require('fs'); - - const awInfo = { - engine_id: "copilot", - engine_name: "GitHub Copilot CLI", - model: process.env.GH_AW_MODEL_AGENT_COPILOT || "", - version: "", - agent_version: "0.0.410", - cli_version: "v0.45.0", - workflow_name: "Daily Repo Status", - experimental: false, - supports_tools_allowlist: true, - supports_http_transport: true, - run_id: context.runId, - run_number: context.runNumber, - run_attempt: process.env.GITHUB_RUN_ATTEMPT, - repository: context.repo.owner + '/' + context.repo.repo, - ref: context.ref, - sha: context.sha, - actor: context.actor, - event_name: context.eventName, - staged: false, - allowed_domains: ["defaults"], - firewall_enabled: true, - awf_version: "v0.18.0", - awmg_version: "v0.1.4", - steps: { - firewall: "squid" - }, - created_at: new Date().toISOString() - }; - - // Write to /tmp/gh-aw directory to avoid inclusion in - const tmpPath = '/tmp/gh-aw/aw_info.json'; - fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); - console.log('Generated aw_info.json at:', tmpPath); - console.log(JSON.stringify(awInfo, null, 2)); - - // Set model as output for reuse in other steps/ - core.setOutput('model', awInfo.model); - - name: Validate COPILOT_GITHUB_TOKEN - id: validate- - run: /opt/gh-aw/actions/validate_multi_secret.sh COPILOT_GITHUB_TOKEN 'GitHub Copilot CLI' https://github.github.com/gh-aw/reference/engines/#github-copilot- - env: - COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} - - name: Install GitHub Copilot - run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0. - - name: Install awf - run: bash /opt/gh-aw/actions/install_awf_binary.sh v0.18. - - name: Download container - run: bash /opt/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.18.0 ghcr.io/github/gh-aw-firewall/squid:0.18.0 ghcr.io/github/gh-aw-mcpg:v0.1.4 ghcr.io/github/github-mcp-server:v0.30.3 node:lts- - - name: Write Safe Outputs - run: | - mkdir -p /opt/gh-aw/ - mkdir -p /tmp/gh-aw/ - mkdir -p /tmp/gh-aw/mcp-logs/ - cat > /opt/gh-aw/safeoutputs/config.json << 'GH_AW_SAFE_OUTPUTS_CONFIG_EOF' - {"create_issue":{"max":1},"missing_data":{},"missing_tool":{},"noop":{"max":1}} - - cat > /opt/gh-aw/safeoutputs/tools.json << 'GH_AW_SAFE_OUTPUTS_TOOLS_EOF' - [ - { - "description": "Create a new GitHub issue for tracking bugs, feature requests, or tasks. Use this for actionable work items that need assignment, labeling, and status tracking. For reports, announcements, or status updates that don't require task tracking, use create_discussion instead. CONSTRAINTS: Maximum 1 issue(s) can be created. Title will be prefixed with \"[repo-status] \". Labels [report daily-status] will be automatically added.", - "inputSchema": { - "additionalProperties": false, - "properties": { - "body": { - "description": "Detailed issue description in Markdown. Do NOT repeat the title as a heading since it already appears as the issue's h1. Include context, reproduction steps, or acceptance criteria as appropriate.", - "type": "string" - }, - "labels": { - "description": "Labels to categorize the issue (e.g., 'bug', 'enhancement'). Labels must exist in the repository.", - "items": { - "type": "string" - }, - "type": "array" - }, - "parent": { - "description": "Parent issue number for creating sub-issues. This is the numeric ID from the GitHub URL (e.g., 42 in github.com/owner/repo/issues/42). Can also be a temporary_id (e.g., 'aw_abc123', 'aw_Test123') from a previously created issue in the same workflow run.", - "type": [ - "number", - "string" - ] - }, - "temporary_id": { - "description": "Unique temporary identifier for referencing this issue before it's created. Format: 'aw_' followed by 3 to 8 alphanumeric characters (e.g., 'aw_abc1', 'aw_Test123'). Use '#aw_ID' in body text to reference other issues by their temporary_id; these are replaced with actual issue numbers after creation.", - "pattern": "^aw_[A-Za-z0-9]{4,8}$", - "type": "string" - }, - "title": { - "description": "Concise issue title summarizing the bug, feature, or task. The title appears as the main heading, so keep it brief and descriptive.", - "type": "string" - } - }, - "required": [ - "title", - "body" - ], - "type": "object" - }, - "name": "create_issue" - }, - { - "description": "Report that a tool or capability needed to complete the task is not available, or share any information you deem important about missing functionality or limitations. Use this when you cannot accomplish what was requested because the required functionality is missing or access is restricted.", - "inputSchema": { - "additionalProperties": false, - "properties": { - "alternatives": { - "description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).", - "type": "string" - }, - "reason": { - "description": "Explanation of why this tool is needed or what information you want to share about the limitation (max 256 characters).", - "type": "string" - }, - "tool": { - "description": "Optional: Name or description of the missing tool or capability (max 128 characters). Be specific about what functionality is needed.", - "type": "string" - } - }, - "required": [ - "reason" - ], - "type": "object" - }, - "name": "missing_tool" - }, - { - "description": "Log a transparency message when no significant actions are needed. Use this to confirm workflow completion and provide visibility when analysis is complete but no changes or outputs are required (e.g., 'No issues found', 'All checks passed'). This ensures the workflow produces human-visible output even when no other actions are taken.", - "inputSchema": { - "additionalProperties": false, - "properties": { - "message": { - "description": "Status or completion message to log. Should explain what was analyzed and the outcome (e.g., 'Code review complete - no issues found', 'Analysis complete - all tests passing').", - "type": "string" - } - }, - "required": [ - "message" - ], - "type": "object" - }, - "name": "noop" - }, - { - "description": "Report that data or information needed to complete the task is not available. Use this when you cannot accomplish what was requested because required data, context, or information is missing.", - "inputSchema": { - "additionalProperties": false, - "properties": { - "alternatives": { - "description": "Any workarounds, manual steps, or alternative approaches the user could take (max 256 characters).", - "type": "string" - }, - "context": { - "description": "Additional context about the missing data or where it should come from (max 256 characters).", - "type": "string" - }, - "data_type": { - "description": "Type or description of the missing data or information (max 128 characters). Be specific about what data is needed.", - "type": "string" - }, - "reason": { - "description": "Explanation of why this data is needed to complete the task (max 256 characters).", - "type": "string" - } - }, - "required": [], - "type": "object" - }, - "name": "missing_data" - } - ] - - cat > /opt/gh-aw/safeoutputs/validation.json << 'GH_AW_SAFE_OUTPUTS_VALIDATION_EOF' - { - "create_issue": { - "defaultMax": 1, - "fields": { - "body": { - "required": true, - "type": "string", - "sanitize": true, - "maxLength": - }, - "labels": { - "type": "array", - "itemType": "string", - "itemSanitize": true, - "itemMaxLength": - }, - "parent": { - "issueOrPRNumber": - }, - "repo": { - "type": "string", - "maxLength": - }, - "temporary_id": { - "type": "string" - }, - "title": { - "required": true, - "type": "string", - "sanitize": true, - "maxLength": - } - } - }, - "missing_tool": { - "defaultMax": 20, - "fields": { - "alternatives": { - "type": "string", - "sanitize": true, - "maxLength": - }, - "reason": { - "required": true, - "type": "string", - "sanitize": true, - "maxLength": - }, - "tool": { - "type": "string", - "sanitize": true, - "maxLength": - } - } - }, - "noop": { - "defaultMax": 1, - "fields": { - "message": { - "required": true, - "type": "string", - "sanitize": true, - "maxLength": - } - } - } - } - - - name: Generate Safe Outputs MCP Server - id: safe-outputs- - run: | - # Generate a secure random API key (360 bits of entropy, 40+ chars) - # Mask immediately to prevent timing - API_KEY=$(openssl rand -base64 45 | tr -d '/+=') - echo "::add-mask::${API_KEY}" - - PORT= - - # Set outputs for next - { - echo "safe_outputs_api_key=${API_KEY}" - echo "safe_outputs_port=${PORT}" - } >> "$GITHUB_OUTPUT" - - echo "Safe Outputs MCP server will run on port ${PORT}" - - - name: Start Safe Outputs MCP HTTP - id: safe-outputs- - env: - DEBUG: '*' - GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }} - GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }} - GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools. - GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config. - GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/ - run: | - # Environment variables are set above to prevent template - export - export - export - export - export - export - - bash /opt/gh-aw/actions/start_safe_outputs_server. - - - name: Start MCP - id: start-mcp- - env: - GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }} - GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }} - GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - run: | - set -eo - mkdir -p /tmp/gh-aw/mcp- - - # Export gateway environment variables for MCP config and gateway - export MCP_GATEWAY_PORT="80" - export MCP_GATEWAY_DOMAIN="host.docker.internal" - MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=') - echo "::add-mask::${MCP_GATEWAY_API_KEY}" - export - export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads" - mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}" - export DEBUG="*" - - export GH_AW_ENGINE="copilot" - export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_LOCKDOWN -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.1.4' - - mkdir -p /home/runner/. - cat << GH_AW_MCP_CONFIG_EOF | bash /opt/gh-aw/actions/start_mcp_gateway. - { - "mcpServers": { - "github": { - "type": "stdio", - "container": "ghcr.io/github/github-mcp-server:v0.30.3", - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "\${GITHUB_MCP_SERVER_TOKEN}", - "GITHUB_READ_ONLY": "1", - "GITHUB_TOOLSETS": "context,repos,issues,pull_requests" - } - }, - "safeoutputs": { - "type": "http", - "url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT", - "headers": { - "Authorization": "\${GH_AW_SAFE_OUTPUTS_API_KEY}" - } - } - }, - "gateway": { - "port": $MCP_GATEWAY_PORT, - "domain": "${MCP_GATEWAY_DOMAIN}", - "apiKey": "${MCP_GATEWAY_API_KEY}", - "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}" - } - } - - - name: Generate workflow - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - with: - script: | - const { generateWorkflowOverview } = require('/opt/gh-aw/actions/generate_workflow_overview.cjs'); - await generateWorkflowOverview(core); - - name: Create prompt with built-in - env: - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_GITHUB_ACTOR: ${{ github.actor }} - GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }} - GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }} - GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }} - GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} - GH_AW_GITHUB_REPOSITORY: ${{ github.repository }} - GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} - GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }} - run: | - bash /opt/gh-aw/actions/create_prompt_first. - cat << 'GH_AW_PROMPT_EOF' > "$GH_AW_PROMPT" - - - cat "/opt/gh-aw/prompts/xpia.md" >> "$GH_AW_PROMPT" - cat "/opt/gh-aw/prompts/temp_folder_prompt.md" >> "$GH_AW_PROMPT" - cat "/opt/gh-aw/prompts/markdown.md" >> "$GH_AW_PROMPT" - cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT" - - GitHub API Access Instructions - - The gh CLI is NOT authenticated. Do NOT use gh commands for GitHub operations. - - - To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls. - - Temporary IDs: Some safe output tools support a temporary ID field (usually named temporary_id) so you can reference newly-created items elsewhere in the SAME agent output (for example, using #aw_abc1 in a later body). - - **IMPORTANT - temporary_id format rules:** - - If you DON'T need to reference the item later, OMIT the temporary_id field entirely (it will be auto-generated if needed) - - If you DO need cross-references/chaining, you MUST match this EXACT validation regex: /^aw_[A-Za-z0-9]{3,8}$/ - - Format: aw_ prefix followed by 3 to 8 alphanumeric characters (A-Z, a-z, 0-9, case-insensitive) - - Valid alphanumeric characters: - - INVALID examples: aw_ab (too short), aw_123456789 (too long), aw_test-id (contains hyphen), aw_id_123 (contains underscore) - - VALID examples: aw_abc, aw_abc1, aw_Test123, aw_A1B2C3D4, - - To generate valid IDs: use 3-8 random alphanumeric characters or omit the field to let the system auto- - - Do NOT invent other aw_* formats — downstream steps will reject them with validation errors matching against /^aw_[A-Za-z0-9]{3,8}$/i. - - Discover available tools from the safeoutputs MCP server. - - **Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped. - - **Note**: If you made no other safe output tool calls during this workflow execution, call the "noop" tool to provide a status message indicating completion or that no actions were needed. - - - - The following GitHub context information is available for this workflow: - {{#if __GH_AW_GITHUB_ACTOR__ }} - - **actor**: - {{/if}} - {{#if __GH_AW_GITHUB_REPOSITORY__ }} - - **repository**: - {{/if}} - {{#if __GH_AW_GITHUB_WORKSPACE__ }} - - **workspace**: - {{/if}} - {{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }} - - **issue-number**: # - {{/if}} - {{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }} - - **discussion-number**: # - {{/if}} - {{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }} - - **pull-request-number**: # - {{/if}} - {{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }} - - **comment-id**: - {{/if}} - {{#if __GH_AW_GITHUB_RUN_ID__ }} - - **workflow-run-id**: - {{/if}} - - - - cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT" - - - cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT" - {{#runtime-import .github/workflows/daily-repo-status.md}} - - - name: Substitute - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - GH_AW_GITHUB_ACTOR: ${{ github.actor }} - GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }} - GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }} - GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }} - GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} - GH_AW_GITHUB_REPOSITORY: ${{ github.repository }} - GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} - GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }} - with: - script: | - const substitutePlaceholders = require('/opt/gh-aw/actions/substitute_placeholders.cjs'); - - // Call the substitution - return await substitutePlaceholders({ - file: process.env.GH_AW_PROMPT, - substitutions: { - GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR, - GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID, - GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER, - GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER, - GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER, - GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY, - GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID, - GH_AW_GITHUB_WORKSPACE: process.env. - } - }); - - name: Interpolate variables and render - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/interpolate_prompt.cjs'); - await main(); - - name: Validate prompt - env: - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - run: bash /opt/gh-aw/actions/validate_prompt_placeholders. - - name: Print - env: - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - run: bash /opt/gh-aw/actions/print_prompt_summary. - - name: Clean git - run: bash /opt/gh-aw/actions/clean_git_credentials. - - name: Execute GitHub Copilot - id: - # Copilot CLI tool arguments (sorted): - timeout-minutes: - run: | - set -o - sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --image-tag 0.18.0 --skip-pull \ - -- /bin/bash -c '/usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --allow-all-paths --share /tmp/gh-aw/sandbox/agent/logs/conversation.md --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio. - env: - COPILOT_AGENT_RUNNER_TYPE: - COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} - GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config. - GH_AW_MODEL_AGENT_COPILOT: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || '' }} - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GITHUB_HEAD_REF: ${{ github.head_ref }} - GITHUB_REF_NAME: ${{ github.ref_name }} - GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }} - GITHUB_WORKSPACE: ${{ github.workspace }} - XDG_CONFIG_HOME: /home/ - - name: Configure Git - env: - REPO_NAME: ${{ github.repository }} - SERVER_URL: ${{ github.server_url }} - run: | - git config --global user.email "github-actions[bot]@users.noreply.github.com" - git config --global user.name "github-actions[bot]" - # Re-authenticate git with GitHub - SERVER_URL_STRIPPED="${SERVER_URL#https://}" - git remote set-url origin "https://x-access-token:${{ github.token }}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git" - echo "Git configured with standard GitHub Actions identity" - - name: Copy Copilot session state files to - if: always() - continue-on-error: - run: | - # Copy Copilot session state files to logs folder for artifact - # This ensures they are in /tmp/gh-aw/ where secret redaction can scan - SESSION_STATE_DIR="$HOME/.copilot/session-state" - LOGS_DIR="/tmp/gh-aw/sandbox/agent/logs" - - if [ -d "$SESSION_STATE_DIR" ]; - echo "Copying Copilot session state files from $SESSION_STATE_DIR to $LOGS_DIR" - mkdir -p "$LOGS_DIR" - cp -v "$SESSION_STATE_DIR"/*.jsonl "$LOGS_DIR/" 2>/dev/null || - echo "Session state files copied successfully" - - echo "No session-state directory found at $SESSION_STATE_DIR" - - - name: Stop MCP - if: always() - continue-on-error: - env: - MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }} - MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }} - GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }} - run: | - bash /opt/gh-aw/actions/stop_mcp_gateway.sh "$GATEWAY_PID" - - name: Redact secrets in - if: always() - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/redact_secrets.cjs'); - await main(); - env: - GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN' - SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} - SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }} - SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }} - SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload Safe - if: always() - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0. - with: - name: safe- - path: ${{ env.GH_AW_SAFE_OUTPUTS }} - if-no-files-found: - - name: Ingest agent - id: - if: always() - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_ALLOWED_DOMAINS: "api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com" - GITHUB_SERVER_URL: ${{ github.server_url }} - GITHUB_API_URL: ${{ github.api_url }} - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/collect_ndjson_output.cjs'); - await main(); - - name: Upload sanitized agent - if: always() && env. - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0. - with: - name: agent- - path: ${{ env.GH_AW_AGENT_OUTPUT }} - if-no-files-found: - - name: Upload engine output - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0. - with: - name: - path: | - /tmp/gh-aw/sandbox/agent/logs/ - /tmp/gh-aw/redacted-urls. - if-no-files-found: - - name: Parse agent logs for step - if: always() - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: /tmp/gh-aw/sandbox/agent/logs/ - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/parse_copilot_log.cjs'); - await main(); - - name: Parse MCP Gateway logs for step - if: always() - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/parse_mcp_gateway_log.cjs'); - await main(); - - name: Print firewall - if: always() - continue-on-error: - env: - AWF_LOGS_DIR: /tmp/gh-aw/sandbox/firewall/ - run: | - # Fix permissions on firewall logs so they can be uploaded as - # AWF runs with sudo, creating files owned by - sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall/logs 2>/dev/null || - # Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step) - if command -v awf &> /dev/null; - awf logs summary | tee -a "$GITHUB_STEP_SUMMARY" - - echo 'AWF binary not installed, skipping firewall log summary' - - - name: Upload agent - if: always() - continue-on-error: - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0. - with: - name: agent- - path: | - /tmp/gh-aw/aw-prompts/prompt. - /tmp/gh-aw/aw_info. - /tmp/gh-aw/mcp-logs/ - /tmp/gh-aw/sandbox/firewall/logs/ - /tmp/gh-aw/agent-stdio. - /tmp/gh-aw/agent/ - if-no-files-found: - - conclusion: - needs: - - - - - - - - - if: (always()) && (needs.agent.result != 'skipped') - runs-on: ubuntu- - permissions: - contents: - issues: - outputs: - noop_message: ${{ steps.noop.outputs.noop_message }} - tools_reported: ${{ steps.missing_tool.outputs.tools_reported }} - total_count: ${{ steps.missing_tool.outputs.total_count }} - steps: - - name: Setup - uses: github/gh-aw/actions/setup@58d1d157fbac0f1204798500faefc4f7461ebe28 # v0.45. - with: - destination: /opt/gh-aw/ - - name: Download agent output - continue-on-error: - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0. - with: - name: agent- - path: /tmp/gh-aw/safeoutputs/ - - name: Setup agent output environment - run: | - mkdir -p /tmp/gh-aw/safeoutputs/ - find "/tmp/gh-aw/safeoutputs/" -type f - - echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV" - - name: Process No-Op - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_NOOP_MAX: - GH_AW_WORKFLOW_NAME: "Daily Repo Status" - GH_AW_WORKFLOW_SOURCE: "githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87" - GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/githubnext/agentics/tree/d19056381ba48cb1f7c78510c23069701fa7ae87/workflows/daily-repo-status.md" - with: - github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/noop.cjs'); - await main(); - - name: Record Missing - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_WORKFLOW_NAME: "Daily Repo Status" - GH_AW_WORKFLOW_SOURCE: "githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87" - GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/githubnext/agentics/tree/d19056381ba48cb1f7c78510c23069701fa7ae87/workflows/daily-repo-status.md" - with: - github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/missing_tool.cjs'); - await main(); - - name: Handle Agent - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_WORKFLOW_NAME: "Daily Repo Status" - GH_AW_WORKFLOW_SOURCE: "githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87" - GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/githubnext/agentics/tree/d19056381ba48cb1f7c78510c23069701fa7ae87/workflows/daily-repo-status.md" - GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }} - GH_AW_WORKFLOW_ID: "daily-repo-status" - GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.agent.outputs.secret_verification_result }} - GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }} - with: - github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/handle_agent_failure.cjs'); - await main(); - - name: Handle No-Op - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_WORKFLOW_NAME: "Daily Repo Status" - GH_AW_WORKFLOW_SOURCE: "githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87" - GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/githubnext/agentics/tree/d19056381ba48cb1f7c78510c23069701fa7ae87/workflows/daily-repo-status.md" - GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }} - GH_AW_NOOP_MESSAGE: ${{ steps.noop.outputs.noop_message }} - GH_AW_NOOP_REPORT_AS_ISSUE: "true" - with: - github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/handle_noop_message.cjs'); - await main(); - - detection: - needs: - if: needs.agent.outputs.output_types != '' || needs.agent.outputs.has_patch == 'true' - runs-on: ubuntu- - permissions: {} - concurrency: - group: "gh-aw-copilot-${{ github.workflow }}" - timeout-minutes: - outputs: - success: ${{ steps.parse_results.outputs.success }} - steps: - - name: Setup - uses: github/gh-aw/actions/setup@58d1d157fbac0f1204798500faefc4f7461ebe28 # v0.45. - with: - destination: /opt/gh-aw/ - - name: Download agent - continue-on-error: - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0. - with: - name: agent- - path: /tmp/gh-aw/threat-detection/ - - name: Download agent output - continue-on-error: - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0. - with: - name: agent- - path: /tmp/gh-aw/threat-detection/ - - name: Echo agent output - env: - AGENT_OUTPUT_TYPES: ${{ needs.agent.outputs.output_types }} - run: | - echo "Agent output-types: $AGENT_OUTPUT_TYPES" - - name: Setup threat - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - WORKFLOW_NAME: "Daily Repo Status" - WORKFLOW_DESCRIPTION: "This workflow creates daily repo status reports. It gathers recent repository\nactivity (issues, PRs, discussions, releases, code changes) and generates\nengaging GitHub issues with productivity insights, community highlights,\nand project recommendations." - HAS_PATCH: ${{ needs.agent.outputs.has_patch }} - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/setup_threat_detection.cjs'); - await main(); - - name: Ensure threat-detection directory and - run: | - mkdir -p /tmp/gh-aw/threat- - touch /tmp/gh-aw/threat-detection/detection. - - name: Validate COPILOT_GITHUB_TOKEN - id: validate- - run: /opt/gh-aw/actions/validate_multi_secret.sh COPILOT_GITHUB_TOKEN 'GitHub Copilot CLI' https://github.github.com/gh-aw/reference/engines/#github-copilot- - env: - COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} - - name: Install GitHub Copilot - run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0. - - name: Execute GitHub Copilot - id: - # Copilot CLI tool arguments (sorted): - # --allow-tool shell(cat) - # --allow-tool shell(grep) - # --allow-tool shell(head) - # --allow-tool shell(jq) - # --allow-tool shell(ls) - # --allow-tool shell(tail) - # --allow-tool shell(wc) - timeout-minutes: - run: | - set -o - COPILOT_CLI_INSTRUCTION="$(cat /tmp/gh-aw/aw-prompts/prompt.txt)" - mkdir -p /tmp/ - mkdir -p /tmp/gh-aw/ - mkdir -p /tmp/gh-aw/agent/ - mkdir -p /tmp/gh-aw/sandbox/agent/logs/ - copilot --add-dir /tmp/ --add-dir /tmp/gh-aw/ --add-dir /tmp/gh-aw/agent/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --allow-tool 'shell(cat)' --allow-tool 'shell(grep)' --allow-tool 'shell(head)' --allow-tool 'shell(jq)' --allow-tool 'shell(ls)' --allow-tool 'shell(tail)' --allow-tool 'shell(wc)' --share /tmp/gh-aw/sandbox/agent/logs/conversation.md --prompt "$COPILOT_CLI_INSTRUCTION"${GH_AW_MODEL_DETECTION_COPILOT:+ --model "$GH_AW_MODEL_DETECTION_COPILOT"} 2>&1 | tee /tmp/gh-aw/threat-detection/detection. - env: - COPILOT_AGENT_RUNNER_TYPE: - COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }} - GH_AW_MODEL_DETECTION_COPILOT: ${{ vars.GH_AW_MODEL_DETECTION_COPILOT || '' }} - GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt. - GITHUB_HEAD_REF: ${{ github.head_ref }} - GITHUB_REF_NAME: ${{ github.ref_name }} - GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }} - GITHUB_WORKSPACE: ${{ github.workspace }} - XDG_CONFIG_HOME: /home/ - - name: Parse threat detection - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - with: - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/parse_threat_detection_results.cjs'); - await main(); - - name: Upload threat detection - if: always() - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0. - with: - name: threat-detection. - path: /tmp/gh-aw/threat-detection/detection. - if-no-files-found: - - safe_outputs: - needs: - - - - - if: ((!cancelled()) && (needs.agent.result != 'skipped')) && (needs.detection.outputs.success == 'true') - runs-on: ubuntu- - permissions: - contents: - issues: - timeout-minutes: - env: - GH_AW_ENGINE_ID: "copilot" - GH_AW_WORKFLOW_ID: "daily-repo-status" - GH_AW_WORKFLOW_NAME: "Daily Repo Status" - GH_AW_WORKFLOW_SOURCE: "githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87" - GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/githubnext/agentics/tree/d19056381ba48cb1f7c78510c23069701fa7ae87/workflows/daily-repo-status.md" - outputs: - create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }} - create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }} - process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }} - process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }} - steps: - - name: Setup - uses: github/gh-aw/actions/setup@58d1d157fbac0f1204798500faefc4f7461ebe28 # v0.45. - with: - destination: /opt/gh-aw/ - - name: Download agent output - continue-on-error: - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0. - with: - name: agent- - path: /tmp/gh-aw/safeoutputs/ - - name: Setup agent output environment - run: | - mkdir -p /tmp/gh-aw/safeoutputs/ - find "/tmp/gh-aw/safeoutputs/" -type f - - echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/safeoutputs/agent_output.json" >> "$GITHUB_ENV" - - name: Process Safe - id: - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # - env: - GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }} - GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"create_issue\":{\"labels\":[\"report\",\"daily-status\"],\"max\":1,\"title_prefix\":\"[repo-status] \"},\"missing_data\":{},\"missing_tool\":{}}" - with: - github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - script: | - const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs'); - setupGlobals(core, github, context, exec, io); - const { main } = require('/opt/gh-aw/actions/safe_output_handler_manager.cjs'); - await main(); diff --git a/.github/workflows/daily-repo-status.md b/.github/workflows/daily-repo-status.md deleted file mode 100644 index 431b4afb91a6..000000000000 --- a/.github/workflows/daily-repo-status.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: | - This workflow creates daily repo status reports. It gathers recent repository - activity (issues, PRs, discussions, releases, code changes) and generates - engaging GitHub issues with productivity insights, community highlights, - and project recommendations. - -on: - schedule: daily - workflow_dispatch: - -permissions: - contents: read - issues: read - pull-requests: read - -network: defaults - -tools: - github: - # If in a public repo, setting `lockdown: false` allows - # reading issues, pull requests and comments from 3rd-parties - # If in a private repo this has no particular effect. - lockdown: false - -safe-outputs: - create-issue: - title-prefix: "[repo-status] " - labels: [report, daily-status] -source: githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87 ---- - -# Daily Repo Status - -Create an upbeat daily status report for the repo as a GitHub issue. - -## What to include - -- Recent repository activity (issues, PRs, discussions, releases, code changes) -- Progress tracking, goal reminders and highlights -- Project status and recommendations -- Actionable next steps for maintainers - -## Style - -- Be positive, encouraging, and helpful 🌟 -- Use emojis moderately for engagement -- Keep it concise - adjust length based on actual activity - -## Process - -1. Gather recent activity from the repository -2. Study the repository, its issues and its pull requests -3. Create a new GitHub issue with your findings and insights diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/KvmFileBasedStorageVmSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/KvmFileBasedStorageVmSnapshotStrategy.java index 003065e394f5..d893304cc197 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/KvmFileBasedStorageVmSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/KvmFileBasedStorageVmSnapshotStrategy.java @@ -77,6 +77,8 @@ public class KvmFileBasedStorageVmSnapshotStrategy extends StorageVMSnapshotStra private static final List supportedStoragePoolTypes = List.of(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem, Storage.StoragePoolType.SharedMountPoint); + private static final String ONTAP_PROVIDER_NAME = "NetApp ONTAP"; + @Inject protected SnapshotDataStoreDao snapshotDataStoreDao; @@ -325,6 +327,11 @@ public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMe List volumes = volumeDao.findByInstance(vmId); for (VolumeVO volume : volumes) { StoragePoolVO storagePoolVO = storagePool.findById(volume.getPoolId()); + if (storagePoolVO.isManaged() && ONTAP_PROVIDER_NAME.equals(storagePoolVO.getStorageProviderName())) { + logger.debug(String.format("%s as the VM has a volume on ONTAP managed storage pool [%s]. " + + "ONTAP managed storage has its own dedicated VM snapshot strategy.", cantHandleLog, storagePoolVO.getName())); + return StrategyPriority.CANT_HANDLE; + } if (!supportedStoragePoolTypes.contains(storagePoolVO.getPoolType())) { logger.debug(String.format("%s as the VM has a volume that is in a storage with unsupported type [%s].", cantHandleLog, storagePoolVO.getPoolType())); return StrategyPriority.CANT_HANDLE; @@ -503,8 +510,9 @@ protected VMSnapshot takeVmSnapshotInternal(VMSnapshot vmSnapshot, Map volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid()); - PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); - KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); - - if (volSizeAndNewPath == null) { - continue; - } - try { - Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second()))); - } catch (IOException ex) { - logger.warn("Tried to delete leftover snapshot at [{}] failed.", volSizeAndNewPath.second(), ex); - } - } + cleanupLeftoverDeltas(volumeObjectTos, mapVolumeToSnapshotSizeAndNewVolumePath, storagePoolMgr); return new Answer(cmd, e); + } catch (Exception e) { + logger.error("Unexpected exception while creating disk-only VM snapshot for VM [{}]. Deleting leftover deltas.", vmName, e); + cleanupLeftoverDeltas(volumeObjectTos, mapVolumeToSnapshotSizeAndNewVolumePath, storagePoolMgr); + return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, + String.format("Creation of disk-only VM snapshot for VM [%s] failed due to %s.", vmName, e.getMessage()), null); } return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, mapVolumeToSnapshotSizeAndNewVolumePath); @@ -192,6 +188,23 @@ protected Pair>> createSnapshotXmlAndNewV return new Pair<>(snapshotXml, volumeObjectToNewPathMap); } + protected void cleanupLeftoverDeltas(List volumeObjectTos, Map> mapVolumeToSnapshotSizeAndNewVolumePath, KVMStoragePoolManager storagePoolMgr) { + for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { + Pair volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid()); + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); + KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + + if (volSizeAndNewPath == null) { + continue; + } + try { + Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second()))); + } catch (IOException ex) { + logger.warn("Tried to delete leftover snapshot at [{}] failed.", volSizeAndNewPath.second(), ex); + } + } + } + protected long getFileSize(String path) { return new File(path).length(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index ba689d5107f7..d0a639715034 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -19,6 +19,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.nio.file.Files; +import java.nio.file.Paths; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; @@ -96,10 +98,15 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map 0) { @@ -238,6 +278,15 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { } private long getDeviceSize(String deviceByPath) { + try { + if (!Files.exists(Paths.get(deviceByPath))) { + logger.debug("Device by-path does not exist yet: " + deviceByPath); + return 0L; + } + } catch (Exception ignore) { + // If FS check fails for any reason, fall back to blockdev call + } + Script iScsiAdmCmd = new Script(true, "blockdev", 0, logger); iScsiAdmCmd.add("--getsize64", deviceByPath); @@ -280,8 +329,96 @@ private String getComponent(String path, int index) { return tmp[index].trim(); } + /** + * Check if there are other LUNs on the same iSCSI target (IQN) that are still + * visible as block devices. This is needed because ONTAP uses a single IQN per + * SVM — logging out of the target would kill ALL LUNs, not just the one being + * disconnected. + * + * Checks /dev/disk/by-path/ for symlinks matching the same host:port + IQN but + * with a different LUN number. + */ + private boolean hasOtherActiveLuns(String host, int port, String iqn, String lun) { + String prefix = "ip-" + host + ":" + port + "-iscsi-" + iqn + "-lun-"; + java.io.File byPathDir = new java.io.File("/dev/disk/by-path"); + if (!byPathDir.exists() || !byPathDir.isDirectory()) { + return false; + } + java.io.File[] entries = byPathDir.listFiles(); + if (entries == null) { + return false; + } + for (java.io.File entry : entries) { + String name = entry.getName(); + // Skip partition entries (e.g. lun-0-part1, lun-0-part2) — these are not + // independent LUNs, they are partition symlinks for the same LUN disk. + // Only count actual LUN entries (no "-part" suffix after the lun number). + if (name.startsWith(prefix) && !name.equals(prefix + lun) && !name.contains("-part")) { + logger.debug("Found other active LUN on same target: " + name); + return true; + } + } + return false; + } + + /** + * Removes a single stale SCSI device from the kernel using the sysfs interface. + * + * When ONTAP unmaps a LUN from the host's igroup, the by-path symlink and the + * underlying SCSI device (/dev/sdX) remain present in the kernel until explicitly + * removed — the kernel does not auto-remove devices from live iSCSI sessions. + * + * This method resolves the by-path symlink to the real block device name (e.g. sdd), + * then writes "1" to /sys/block//device/delete — the standard Linux kernel SCSI + * API for removing a single device without tearing down the entire iSCSI session. + * Once the kernel processes the delete, it also removes the by-path symlink. + * + * This is used instead of iscsiadm --logout when other LUNs on the same IQN are still + * active (ONTAP single-IQN-per-SVM model), since logout would tear down ALL LUNs. + */ + private void removeStaleScsiDevice(String host, int port, String iqn, String lun) { + String byPath = getByPath(host, port, "/" + iqn + "/" + lun); + java.nio.file.Path byPathLink = java.nio.file.Paths.get(byPath); + if (!java.nio.file.Files.exists(byPathLink)) { + logger.debug("by-path entry for LUN " + lun + " already gone, nothing to remove"); + return; + } + try { + java.nio.file.Path realDevice = byPathLink.toRealPath(); + String devName = realDevice.getFileName().toString(); + java.io.File deleteFile = new java.io.File("/sys/block/" + devName + "/device/delete"); + if (!deleteFile.exists()) { + logger.warn("sysfs delete entry not found for device " + devName + " — cannot remove stale SCSI device"); + return; + } + try (java.io.FileWriter fw = new java.io.FileWriter(deleteFile)) { + fw.write("1"); + } + logger.info("Removed stale SCSI device " + devName + " for LUN /" + iqn + "/" + lun + " via sysfs"); + } catch (Exception e) { + logger.warn("Failed to remove stale SCSI device for LUN /" + iqn + "/" + lun + ": " + e.getMessage()); + } + } + private boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) { - // use iscsiadm to log out of the iSCSI target and un-discover it + // Check if other LUNs on the same IQN target are still in use. + // ONTAP (and similar) uses a single IQN per SVM with multiple LUNs. + // Doing iscsiadm --logout tears down the ENTIRE target session, + // which would destroy access to ALL LUNs — not just the one being disconnected. + if (hasOtherActiveLuns(host, port, iqn, lun)) { + logger.info("Skipping iSCSI logout for /" + iqn + "/" + lun + + " — other LUNs on the same target are still active. Removing stale SCSI device for this LUN only."); + removeStaleScsiDevice(host, port, iqn, lun); + // After removing this LUN's device, re-check: if no other LUNs remain active, + // If it is the last one then must logout to clean up the iSCSI session entirely. + if (hasOtherActiveLuns(host, port, iqn, lun)) { + logger.info("Other LUNs still active after removing /" + iqn + "/" + lun + " — session kept alive."); + return true; + } + logger.info("No more active LUNs on target after removing /" + iqn + "/" + lun + " — proceeding with iSCSI logout."); + } + + // No other LUNs active on this target — safe to logout and delete the node record. // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --logout Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, logger); @@ -422,6 +559,19 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk srcDisk, String destVolu try { QemuImg q = new QemuImg(timeout); q.convert(srcFile, destFile); + // Below fix is required when vendor depends on host based copy rather than storage CAN_CREATE_VOLUME_FROM_VOLUME capability + // When host based template copy is triggered , small size template sits in RAM(depending on host memory and RAM) and copy is marked successful and by the time flush to storage is triggered + // disconnectPhysicalDisk would disconnect the lun , hence template staying in RAM is not copied to storage lun. Below does flushing of data to storage and marking + // copy as successful once flush is complete. + Script flushCmd = new Script(true, "blockdev", 0, logger); + flushCmd.add("--flushbufs", destDisk.getPath()); + String flushResult = flushCmd.execute(); + if (flushResult != null) { + logger.warn("iSCSI copyPhysicalDisk: blockdev --flushbufs returned: {}", flushResult); + } + Script syncCmd = new Script(true, "sync", 0, logger); + syncCmd.execute(); + logger.info("iSCSI copyPhysicalDisk: flush/sync completed "); } catch (QemuImgException | LibvirtException ex) { String msg = "Failed to copy data from " + srcDisk.getPath() + " to " + destDisk.getPath() + ". The error was the following: " + ex.getMessage(); diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml index 749d876911b8..12035f01d7f9 100644 --- a/plugins/storage/volume/ontap/pom.xml +++ b/plugins/storage/volume/ontap/pom.xml @@ -39,6 +39,7 @@ 5.8.1 3.12.4 5.2.0 + 1.11.13 @@ -121,12 +122,24 @@ ${mockito.version} test + + net.bytebuddy + byte-buddy-agent + ${byte-buddy-agent.version} + test + org.assertj assertj-core ${assertj.version} test + + org.apache.cloudstack + cloud-engine-storage-snapshot + ${project.version} + compile + @@ -151,6 +164,7 @@ ${maven-surefire-plugin.version} false + -javaagent:${settings.localRepository}/net/bytebuddy/byte-buddy-agent/${byte-buddy-agent.version}/byte-buddy-agent-${byte-buddy-agent.version}.jar **/*Test.java diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 305db1b1f2fa..8a47c93ab718 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -18,13 +18,25 @@ */ package org.apache.cloudstack.storage.driver; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; +import com.cloud.host.HostVO; import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -37,23 +49,54 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.client.SnapshotFeignClient; +import org.apache.cloudstack.storage.feign.model.FlexVolSnapshot; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.service.SANStrategy; +import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.UnifiedSANStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.utils.OntapStorageUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import javax.inject.Inject; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; +/** + * Primary datastore driver for NetApp ONTAP storage systems. + * Handles volume lifecycle operations for iSCSI and NFS protocols. + */ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { private static final Logger logger = LogManager.getLogger(OntapPrimaryDatastoreDriver.class); + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private VolumeDao volumeDao; + @Inject private VolumeDetailsDao volumeDetailsDao; + @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Override public Map getCapabilities() { logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); Map mapCapabilities = new HashMap<>(); - mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString()); - mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString()); - + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); return mapCapabilities; } @@ -65,14 +108,235 @@ public DataTO getTO(DataObject data) { @Override public DataStoreTO getStoreTO(DataStore store) { return null; } + @Override + public boolean volumesRequireGrantAccessWhenUsed(){ + logger.info("OntapPrimaryDatastoreDriver: volumesRequireGrantAccessWhenUsed: Called"); + return true; + } + + /** + * Creates a volume on the ONTAP storage system. + */ @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { - throw new UnsupportedOperationException("Create operation is not supported for ONTAP primary storage."); + CreateCmdResult createCmdResult = null; + String errMsg; + + if (dataObject == null) { + throw new InvalidParameterValueException("dataObject should not be null"); + } + if (dataStore == null) { + throw new InvalidParameterValueException("dataStore should not be null"); + } + if (callback == null) { + throw new InvalidParameterValueException("callback should not be null"); + } + + try { + logger.info("Started for data store name [{}] and data object name [{}] of type [{}]", + dataStore.getName(), dataObject.getName(), dataObject.getType()); + + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if (storagePool == null) { + logger.error("createAsync: Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId()); + } + String storagePoolUuid = dataStore.getUuid(); + + Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volInfo = (VolumeInfo) dataObject; + + // Create the backend storage object (LUN for iSCSI, no-op for NFS) + CloudStackVolume created = createCloudStackVolume(dataStore, volInfo, details); + + // Update CloudStack volume record with storage pool association and protocol-specific details + VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); + if (volumeVO != null) { + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + String lunName = created != null && created.getLun() != null ? created.getLun().getName() : null; + if (lunName == null) { + throw new CloudRuntimeException("Missing LUN name for volume " + volInfo.getId()); + } + + // Persist LUN details for future operations (delete, grant/revoke access) + volumeDetailsDao.addDetail(volInfo.getId(), OntapStorageConstants.LUN_DOT_UUID, created.getLun().getUuid(), false); + volumeDetailsDao.addDetail(volInfo.getId(), OntapStorageConstants.LUN_DOT_NAME, lunName, false); + if (created.getLun().getUuid() != null) { + volumeVO.setFolder(created.getLun().getUuid()); + } + + logger.info("createAsync: Created LUN [{}] for volume [{}]. LUN mapping will occur during grantAccess() to per-host igroup.", + lunName, volumeVO.getId()); + createCmdResult = new CreateCmdResult(lunName, new Answer(null, true, null)); + } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + createCmdResult = new CreateCmdResult(volInfo.getUuid(), new Answer(null, true, null)); + logger.info("createAsync: Managed NFS volume [{}] with path [{}] associated with pool {}", + volumeVO.getId(), volInfo.getUuid(), storagePool.getId()); + } + volumeDao.update(volumeVO.getId(), volumeVO); + } + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } catch (Exception e) { + errMsg = e.getMessage(); + logger.error("createAsync: Failed for dataObject name [{}]: {}", dataObject.getName(), errMsg); + createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); + createCmdResult.setResult(e.toString()); + } finally { + if (createCmdResult != null && createCmdResult.isSuccess()) { + logger.info("createAsync: Operation completed successfully for {}", dataObject.getType()); + } + callback.complete(createCmdResult); + } + } + + /** + * Creates a volume on the ONTAP backend. + */ + private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject, Map details) { + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if (storagePool == null) { + logger.error("createCloudStackVolume: Storage Pool not found for id: {}", dataStore.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId()); + } + + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details); + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volumeObject = (VolumeInfo) dataObject; + CloudStackVolume cloudStackVolumeRequest = OntapStorageUtils.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); + return storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); + } else { + throw new CloudRuntimeException("Unsupported DataObjectType: " + dataObject.getType()); + } } + /** + * Deletes a volume or snapshot from the ONTAP storage system. + * + *

For volumes, deletes the backend storage object (LUN for iSCSI, no-op for NFS). + * For snapshots, deletes the FlexVolume snapshot from ONTAP that was created by takeSnapshot.

+ */ @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { - throw new UnsupportedOperationException("Delete operation is not supported for ONTAP primary storage."); + CommandResult commandResult = new CommandResult(); + try { + if (store == null || data == null) { + throw new CloudRuntimeException("store or data is null"); + } + + if (data.getType() == DataObjectType.VOLUME) { + StoragePoolVO storagePool = storagePoolDao.findById(store.getId()); + if (storagePool == null) { + logger.error("deleteAsync: Storage Pool not found for id: " + store.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + store.getId()); + } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details); + logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(OntapStorageConstants.SVM_NAME)); + VolumeInfo volumeInfo = (VolumeInfo) data; + CloudStackVolume cloudStackVolumeRequest = createDeleteCloudStackVolumeRequest(storagePool,details,volumeInfo); + storageStrategy.deleteCloudStackVolume(cloudStackVolumeRequest); + logger.info("deleteAsync: Volume deleted: " + volumeInfo.getId()); + commandResult.setResult(null); + commandResult.setSuccess(true); + } else if (data.getType() == DataObjectType.SNAPSHOT) { + // Delete the ONTAP FlexVolume snapshot that was created by takeSnapshot + deleteOntapSnapshot((SnapshotInfo) data, commandResult); + } else { + throw new CloudRuntimeException("Unsupported data object type: " + data.getType()); + } + } catch (Exception e) { + logger.error("deleteAsync: Failed for data object [{}]: {}", data, e.getMessage()); + commandResult.setSuccess(false); + commandResult.setResult(e.getMessage()); + } finally { + callback.complete(commandResult); + } + } + + /** + * Deletes an ONTAP FlexVolume snapshot. + * + *

Retrieves the snapshot details stored during takeSnapshot and calls the ONTAP + * REST API to delete the FlexVolume snapshot.

+ * + * @param snapshotInfo The CloudStack snapshot to delete + * @param commandResult Result object to populate with success/failure + */ + private void deleteOntapSnapshot(SnapshotInfo snapshotInfo, CommandResult commandResult) { + long snapshotId = snapshotInfo.getId(); + logger.info("deleteOntapSnapshot: Deleting ONTAP FlexVolume snapshot for CloudStack snapshot [{}]", snapshotId); + + try { + // Retrieve snapshot details stored during takeSnapshot + String flexVolUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.BASE_ONTAP_FV_ID); + String ontapSnapshotUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_ID); + String snapshotName = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_NAME); + String poolIdStr = getSnapshotDetail(snapshotId, OntapStorageConstants.PRIMARY_POOL_ID); + + if (flexVolUuid == null || ontapSnapshotUuid == null) { + logger.warn("deleteOntapSnapshot: Missing ONTAP snapshot details for snapshot [{}]. " + + "flexVolUuid={}, ontapSnapshotUuid={}. Snapshot may have been created by a different method or already deleted.", + snapshotId, flexVolUuid, ontapSnapshotUuid); + // Consider this a success since there's nothing to delete on ONTAP + commandResult.setSuccess(true); + commandResult.setResult(null); + return; + } + + long poolId = Long.parseLong(poolIdStr); + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(poolId); + + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + logger.info("deleteOntapSnapshot: Deleting ONTAP snapshot [{}] (uuid={}) from FlexVol [{}]", + snapshotName, ontapSnapshotUuid, flexVolUuid); + + // Call ONTAP REST API to delete the snapshot + JobResponse jobResponse = snapshotClient.deleteSnapshot(authHeader, flexVolUuid, ontapSnapshotUuid); + + if (jobResponse != null && jobResponse.getJob() != null) { + // Poll for job completion + Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2); + if (!jobSucceeded) { + throw new CloudRuntimeException("Delete job failed for snapshot [" + + snapshotName + "] on FlexVol [" + flexVolUuid + "]"); + } + } + + logger.info("deleteOntapSnapshot: Successfully deleted ONTAP snapshot [{}] (uuid={}) for CloudStack snapshot [{}]", + snapshotName, ontapSnapshotUuid, snapshotId); + + commandResult.setSuccess(true); + commandResult.setResult(null); + + } catch (Exception e) { + // Check if the error indicates snapshot doesn't exist (already deleted) + String errorMsg = e.getMessage(); + if (errorMsg != null && (errorMsg.contains("404") || errorMsg.contains("not found") || + errorMsg.contains("does not exist"))) { + logger.warn("deleteOntapSnapshot: ONTAP snapshot for CloudStack snapshot [{}] not found, " + + "may have been already deleted. Treating as success.", snapshotId); + commandResult.setSuccess(true); + commandResult.setResult(null); + } else { + logger.error("deleteOntapSnapshot: Failed to delete ONTAP snapshot for CloudStack snapshot [{}]: {}", + snapshotId, e.getMessage(), e); + commandResult.setSuccess(false); + commandResult.setResult(e.getMessage()); + } + } } @Override @@ -98,14 +362,234 @@ public ChapInfo getChapInfo(DataObject dataObject) { return null; } + /** + * Grants a host access to a volume. + */ @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - return false; + try { + if (dataStore == null) { + throw new InvalidParameterValueException("dataStore should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("dataObject should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("host should not be null"); + } + + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if (storagePool == null) { + logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId()); + } + String storagePoolUuid = dataStore.getUuid(); + + // ONTAP managed storage only supports cluster and zone scoped pools + if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { + logger.error("grantAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); + throw new CloudRuntimeException("Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); + } + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); + if (volumeVO == null) { + logger.error("grantAccess: CloudStack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("CloudStack Volume not found for id: " + dataObject.getId()); + } + + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + String svmName = details.get(OntapStorageConstants.SVM_NAME); + + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + // Only retrieve LUN name for iSCSI volumes + String cloudStackVolumeName = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue(); + UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) OntapStorageUtils.getStrategyByStoragePoolDetails(details); + String accessGroupName = OntapStorageUtils.getIgroupName(svmName, host.getName()); + + // Validate if Igroup exist ONTAP for this host as we may be using delete_on_unmap= true and igroup may be deleted by ONTAP automatically + Map getAccessGroupMap = Map.of( + OntapStorageConstants.NAME, accessGroupName, + OntapStorageConstants.SVM_DOT_NAME, svmName + ); + Igroup igroup = new Igroup(); + AccessGroup accessGroup = sanStrategy.getAccessGroup(getAccessGroupMap); + if(accessGroup == null || accessGroup.getIgroup() == null) { + logger.info("grantAccess: Igroup {} does not exist for the host {} : Need to create Igroup for the host ", accessGroupName, host.getName()); + // create the igroup for the host and perform lun-mapping + accessGroup = new AccessGroup(); + List hosts = new ArrayList<>(); + hosts.add((HostVO) host); + accessGroup.setHostsToConnect(hosts); + accessGroup.setStoragePoolId(storagePool.getId()); + accessGroup = sanStrategy.createAccessGroup(accessGroup); + }else{ + logger.info("grantAccess: Igroup {} already exist for the host {}: ", accessGroup.getIgroup().getName() ,host.getName()); + igroup = accessGroup.getIgroup(); + /* TODO Below cases will be covered later, for now they will be a pre-requisite on customer side + 1. Igroup exist with the same name but host initiator has been rempved + 2. Igroup exist with the same name but host initiator has been changed may be due to new NIC or new adapter + In both cases we need to verify current host initiator is registered in the igroup before allowing access + Incase it is not , add it and proceed for lun-mapping + */ + } + logger.info("grantAccess: Igroup {} is present now with initiators {} ", accessGroup.getIgroup().getName(), accessGroup.getIgroup().getInitiators()); + // Create or retrieve existing LUN mapping + String lunNumber = sanStrategy.ensureLunMapped(svmName, cloudStackVolumeName, accessGroupName); + + // Update volume path if changed (e.g., after migration or re-mapping) + String iscsiPath = OntapStorageConstants.SLASH + storagePool.getPath() + OntapStorageConstants.SLASH + lunNumber; + if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) { + volumeVO.set_iScsiName(iscsiPath); + volumeVO.setPath(iscsiPath); + } + } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + // For NFS, no access grant needed - file is accessible via mount + logger.debug("grantAccess: NFS volume [{}], no igroup mapping required", volumeVO.getUuid()); + return true; + } + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); + } else { + logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); + } + return true; + } catch (Exception e) { + logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); + throw new CloudRuntimeException("Failed with error: " + e.getMessage(), e); + } } + /** + * Revokes a host's access to a volume. + */ @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - throw new UnsupportedOperationException("Revoke access operation is not supported for ONTAP primary storage."); + try { + if (dataStore == null) { + throw new InvalidParameterValueException("dataStore should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("dataObject should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("host should not be null"); + } + + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if (storagePool == null) { + logger.error("revokeAccess: Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId()); + } + + if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { + logger.error("revokeAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); + throw new CloudRuntimeException("Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); + } + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); + if (volumeVO == null) { + logger.error("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("CloudStack Volume not found for id: " + dataObject.getId()); + } + revokeAccessForVolume(storagePool, volumeVO, host); + } else { + logger.error("revokeAccess: Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); + } + } catch (Exception e) { + logger.error("revokeAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); + throw new CloudRuntimeException("Failed with error: " + e.getMessage(), e); + } + } + + /** + * Revokes volume access for the specified host. + */ + private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { + logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName()); + + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details); + String svmName = details.get(OntapStorageConstants.SVM_NAME); + + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + String accessGroupName = OntapStorageUtils.getIgroupName(svmName, host.getName()); + + // Retrieve LUN name from volume details; if missing, volume may not have been fully created + String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME) != null ? + volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue() : null; + if (lunName == null) { + logger.warn("revokeAccessForVolume: No LUN name found for volume [{}]; skipping revoke", volumeVO.getId()); + return; + } + + // Verify LUN still exists on ONTAP (may have been manually deleted) + CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, lunName); + if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getUuid() == null) { + logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, skipping revoke", volumeVO.getId()); + return; + } + + // Verify igroup still exists on ONTAP + AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); + if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getUuid() == null) { + logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, skipping revoke", accessGroupName); + return; + } + + // Verify host initiator is in the igroup before attempting to remove mapping + SANStrategy sanStrategy = (UnifiedSANStrategy) storageStrategy; + if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroup.getIgroup())) { + logger.warn("revokeAccessForVolume: Initiator [{}] is not in iGroup [{}], skipping revoke", + host.getStorageUrl(), accessGroupName); + return; + } + + // Remove the LUN mapping from the igroup + Map disableLogicalAccessMap = new HashMap<>(); + disableLogicalAccessMap.put(OntapStorageConstants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid()); + disableLogicalAccessMap.put(OntapStorageConstants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid()); + storageStrategy.disableLogicalAccess(disableLogicalAccessMap); + + logger.info("revokeAccessForVolume: Successfully revoked access to LUN [{}] for host [{}]", + lunName, host.getName()); + } + } + + /** + * Retrieves a volume from ONTAP by name. + */ + private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) { + Map getCloudStackVolumeMap = new HashMap<>(); + getCloudStackVolumeMap.put(OntapStorageConstants.NAME, cloudStackVolumeName); + getCloudStackVolumeMap.put(OntapStorageConstants.SVM_DOT_NAME, svmName); + + CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap); + if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { + logger.warn("getCloudStackVolumeByName: LUN [{}] not found on ONTAP", cloudStackVolumeName); + return null; + } + return cloudStackVolume; + } + + /** + * Retrieves an access group from ONTAP by name. + */ + private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) { + Map getAccessGroupMap = new HashMap<>(); + getAccessGroupMap.put(OntapStorageConstants.NAME, accessGroupName); + getAccessGroupMap.put(OntapStorageConstants.SVM_DOT_NAME, svmName); + + AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap); + if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) { + logger.warn("getAccessGroupByName: iGroup [{}] not found on ONTAP", accessGroupName); + return null; + } + return accessGroup; } @Override @@ -128,11 +612,268 @@ public long getUsedIops(StoragePool storagePool) { return 0; } + /** + * Takes a snapshot by creating an ONTAP FlexVolume-level snapshot. + * + *

This method creates a point-in-time, space-efficient snapshot of the entire + * FlexVolume containing the CloudStack volume. FlexVolume snapshots are atomic + * and capture all files/LUNs within the volume at the moment of creation.

+ * + *

Both NFS and iSCSI protocols use the same FlexVolume snapshot approach: + *

    + *
  • NFS: The QCOW2 file is captured within the FlexVolume snapshot
  • + *
  • iSCSI: The LUN is captured within the FlexVolume snapshot
  • + *
+ *

+ * + *

With {@code STORAGE_SYSTEM_SNAPSHOT=true}, {@code StorageSystemSnapshotStrategy} + * handles the workflow.

+ */ @Override - public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) {} + public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { + logger.info("OntapPrimaryDatastoreDriver.takeSnapshot: Creating FlexVolume snapshot for snapshot [{}]", snapshot.getId()); + CreateCmdResult result; + + try { + VolumeInfo volumeInfo = snapshot.getBaseVolume(); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + if (volumeVO == null) { + throw new CloudRuntimeException("VolumeVO not found for id: " + volumeInfo.getId()); + } + + StoragePoolVO storagePool = storagePoolDao.findById(volumeVO.getPoolId()); + if (storagePool == null) { + logger.error("takeSnapshot: Storage Pool not found for id: {}", volumeVO.getPoolId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + volumeVO.getPoolId()); + } + + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(volumeVO.getPoolId()); + String protocol = poolDetails.get(OntapStorageConstants.PROTOCOL); + String flexVolUuid = poolDetails.get(OntapStorageConstants.VOLUME_UUID); + if (flexVolUuid == null || flexVolUuid.isEmpty()) { + throw new CloudRuntimeException("FlexVolume UUID not found in pool details for pool " + volumeVO.getPoolId()); + } + + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO) snapshot.getTO(); + + // Build snapshot name using volume name and snapshot UUID + String snapshotName = buildSnapshotName(volumeInfo.getName(), snapshot.getUuid()); + + // Resolve the volume path for storing in snapshot details (for revert operation) + String volumePath = resolveVolumePathOnOntap(volumeVO, protocol, poolDetails); + + // For iSCSI, retrieve LUN UUID for restore operations + String lunUuid = null; + if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + lunUuid = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_UUID) != null + ? volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_UUID).getValue() + : null; + if (lunUuid == null) { + throw new CloudRuntimeException("LUN UUID not found for iSCSI volume " + volumeVO.getId()); + } + } + + // Create FlexVolume snapshot via ONTAP REST API + FlexVolSnapshot snapshotRequest = new FlexVolSnapshot(snapshotName, + "CloudStack volume snapshot for volume " + volumeInfo.getName()); + + logger.info("takeSnapshot: Creating ONTAP FlexVolume snapshot [{}] on FlexVol UUID [{}] for volume [{}]", + snapshotName, flexVolUuid, volumeVO.getId()); + + JobResponse jobResponse = snapshotClient.createSnapshot(authHeader, flexVolUuid, snapshotRequest); + if (jobResponse == null || jobResponse.getJob() == null) { + throw new CloudRuntimeException("Failed to initiate FlexVolume snapshot on FlexVol UUID [" + flexVolUuid + "]"); + } + + // Poll for job completion + Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2); + if (!jobSucceeded) { + throw new CloudRuntimeException("FlexVolume snapshot job failed on FlexVol UUID [" + flexVolUuid + "]"); + } + + // Retrieve the created snapshot UUID by name + String ontapSnapshotUuid = resolveSnapshotUuid(snapshotClient, authHeader, flexVolUuid, snapshotName); + if (ontapSnapshotUuid == null || ontapSnapshotUuid.isEmpty()) { + throw new CloudRuntimeException("Failed to resolve snapshot UUID for snapshot name [" + snapshotName + "]"); + } + + // Set snapshot path for CloudStack (format: snapshotName for identification) + snapshotObjectTo.setPath(OntapStorageConstants.ONTAP_SNAP_ID + "=" + ontapSnapshotUuid); + + // Persist snapshot details for revert/delete operations + updateSnapshotDetails(snapshot.getId(), volumeInfo.getId(), flexVolUuid, + ontapSnapshotUuid, snapshotName, volumePath, volumeVO.getPoolId(), protocol, lunUuid); + + CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo); + result = new CreateCmdResult(null, createObjectAnswer); + result.setResult(null); + + logger.info("takeSnapshot: Successfully created FlexVolume snapshot [{}] (uuid={}) for volume [{}]", + snapshotName, ontapSnapshotUuid, volumeVO.getId()); + + } catch (Exception ex) { + logger.error("takeSnapshot: Failed due to ", ex); + result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString())); + result.setResult(ex.toString()); + } + + callback.complete(result); + } + + /** + * Resolves the volume path on ONTAP for snapshot restore operations. + * + * @param volumeVO The CloudStack volume + * @param protocol Storage protocol (NFS3 or ISCSI) + * @param poolDetails Pool configuration details + * @return The ONTAP path (file path for NFS, LUN name for iSCSI) + */ + private String resolveVolumePathOnOntap(VolumeVO volumeVO, String protocol, Map poolDetails) { + if (ProtocolType.NFS3.name().equalsIgnoreCase(protocol)) { + // For NFS, use the volume's file path + return volumeVO.getPath(); + } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + // For iSCSI, retrieve the LUN name from volume details + String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME) != null ? + volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue() : null; + if (lunName == null) { + throw new CloudRuntimeException("No LUN name found for volume " + volumeVO.getId()); + } + return lunName; + } + throw new CloudRuntimeException("Unsupported protocol " + protocol); + } + + /** + * Resolves the ONTAP snapshot UUID by querying for the snapshot by name. + * + * @param snapshotClient The ONTAP snapshot Feign client + * @param authHeader Authorization header + * @param flexVolUuid FlexVolume UUID + * @param snapshotName Name of the snapshot to find + * @return The UUID of the snapshot, or null if not found + */ + private String resolveSnapshotUuid(SnapshotFeignClient snapshotClient, String authHeader, + String flexVolUuid, String snapshotName) { + Map queryParams = new HashMap<>(); + queryParams.put("name", snapshotName); + queryParams.put("fields", "uuid,name"); + + OntapResponse response = snapshotClient.getSnapshots(authHeader, flexVolUuid, queryParams); + if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) { + return response.getRecords().get(0).getUuid(); + } + return null; + } + + /** + * Reverts a volume to a snapshot using protocol-specific ONTAP restore APIs. + * + *

This method delegates to the appropriate StorageStrategy to restore the + * specific file (NFS) or LUN (iSCSI) from the FlexVolume snapshot directly + * via ONTAP REST API, without involving the hypervisor agent.

+ * + *

Protocol-specific handling (delegated to strategy classes):

+ *
    + *
  • NFS (UnifiedNASStrategy): Uses the single-file restore API: + * {@code POST /api/storage/volumes/{volume_uuid}/snapshots/{snapshot_uuid}/files/{file_path}/restore} + * Restores the QCOW2 file from the FlexVolume snapshot to its original location.
  • + *
  • iSCSI (UnifiedSANStrategy): Uses the LUN restore API: + * {@code POST /api/storage/luns/{lun.uuid}/restore} + * Restores the LUN data from the snapshot to the specified destination path.
  • + *
+ */ @Override - public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) {} + public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, + AsyncCompletionCallback callback) { + logger.info("OntapPrimaryDatastoreDriver.revertSnapshot: Reverting snapshot [{}]", + snapshotOnImageStore.getId()); + + CommandResult result = new CommandResult(); + + try { + // Use the snapshot that has the ONTAP details stored + SnapshotInfo snapshot = snapshotOnPrimaryStore != null ? snapshotOnPrimaryStore : snapshotOnImageStore; + long snapshotId = snapshot.getId(); + + // Retrieve snapshot details stored during takeSnapshot + String flexVolUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.BASE_ONTAP_FV_ID); + String ontapSnapshotUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_ID); + String snapshotName = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_NAME); + String volumePath = getSnapshotDetail(snapshotId, OntapStorageConstants.VOLUME_PATH); + String poolIdStr = getSnapshotDetail(snapshotId, OntapStorageConstants.PRIMARY_POOL_ID); + String protocol = getSnapshotDetail(snapshotId, OntapStorageConstants.PROTOCOL); + + if (flexVolUuid == null || snapshotName == null || volumePath == null || poolIdStr == null) { + throw new CloudRuntimeException("Missing required snapshot details for snapshot " + snapshotId + + " (flexVolUuid=" + flexVolUuid + ", snapshotName=" + snapshotName + + ", volumePath=" + volumePath + ", poolId=" + poolIdStr + ")"); + } + + long poolId = Long.parseLong(poolIdStr); + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(poolId); + + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + + // Get the FlexVolume name (required for CLI-based restore API for all protocols) + String flexVolName = poolDetails.get(OntapStorageConstants.VOLUME_NAME); + if (flexVolName == null || flexVolName.isEmpty()) { + throw new CloudRuntimeException("FlexVolume name not found in pool details for pool " + poolId); + } + + // Prepare protocol-specific parameters (lunUuid is only needed for backward compatibility) + String lunUuid = null; + if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + lunUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.LUN_DOT_UUID); + } + + // Delegate to strategy class for protocol-specific restore + JobResponse jobResponse = storageStrategy.revertSnapshotForCloudStackVolume( + snapshotName, flexVolUuid, ontapSnapshotUuid, volumePath, lunUuid, flexVolName); + + if (jobResponse == null || jobResponse.getJob() == null) { + throw new CloudRuntimeException("Failed to initiate restore from snapshot [" + + snapshotName + "]"); + } + + // Poll for job completion (use longer timeout for large LUNs/files) + Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 60, 2); + if (!jobSucceeded) { + throw new CloudRuntimeException("Restore job failed for snapshot [" + + snapshotName + "]"); + } + + logger.info("revertSnapshot: Successfully restored {} [{}] from snapshot [{}]", + ProtocolType.ISCSI.name().equalsIgnoreCase(protocol) ? "LUN" : "file", + volumePath, snapshotName); + + result.setResult(null); // Success + + } catch (Exception ex) { + logger.error("revertSnapshot: Failed to revert snapshot {}", snapshotOnImageStore, ex); + result.setResult(ex.toString()); + } + + callback.complete(result); + } + + /** + * Retrieves a snapshot detail value by key. + * + * @param snapshotId The CloudStack snapshot ID + * @param key The detail key + * @return The detail value, or null if not found + */ + private String getSnapshotDetail(long snapshotId, String key) { + SnapshotDetailsVO detail = snapshotDetailsDao.findDetail(snapshotId, key); + return detail != null ? detail.getValue() : null; + } @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {} @@ -149,7 +890,7 @@ public Pair getStorageStats(StoragePool storagePool) { @Override public boolean canProvideVolumeStats() { - return true; + return false; // Not yet implemented for RAW managed NFS } @Override @@ -184,5 +925,111 @@ public boolean isStorageSupportHA(Storage.StoragePoolType type) { } @Override - public void detachVolumeFromAllStorageNodes(Volume volume) {} + public void detachVolumeFromAllStorageNodes(Volume volume) { + } + + private CloudStackVolume createDeleteCloudStackVolumeRequest(StoragePool storagePool, Map details, VolumeInfo volumeInfo) { + CloudStackVolume cloudStackVolumeDeleteRequest = null; + + String protocol = details.get(OntapStorageConstants.PROTOCOL); + ProtocolType protocolType = ProtocolType.valueOf(protocol); + switch (protocolType) { + case NFS3: + cloudStackVolumeDeleteRequest = new CloudStackVolume(); + cloudStackVolumeDeleteRequest.setDatastoreId(String.valueOf(storagePool.getId())); + cloudStackVolumeDeleteRequest.setVolumeInfo(volumeInfo); + break; + case ISCSI: + // Retrieve LUN identifiers stored during volume creation + String lunName = volumeDetailsDao.findDetail(volumeInfo.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue(); + String lunUUID = volumeDetailsDao.findDetail(volumeInfo.getId(), OntapStorageConstants.LUN_DOT_UUID).getValue(); + if (lunName == null) { + throw new CloudRuntimeException("Missing LUN name for volume " + volumeInfo.getId()); + } + cloudStackVolumeDeleteRequest = new CloudStackVolume(); + Lun lun = new Lun(); + lun.setName(lunName); + lun.setUuid(lunUUID); + cloudStackVolumeDeleteRequest.setLun(lun); + break; + default: + throw new CloudRuntimeException("Unsupported protocol " + protocol); + + } + return cloudStackVolumeDeleteRequest; + + } + + // ────────────────────────────────────────────────────────────────────────── + // Snapshot Helper Methods + // ────────────────────────────────────────────────────────────────────────── + + /** + * Builds a snapshot name with proper length constraints. + * Format: {@code -} + */ + private String buildSnapshotName(String volumeName, String snapshotUuid) { + String name = volumeName + "-" + snapshotUuid; + int maxLength = OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH; + int trimRequired = name.length() - maxLength; + + if (trimRequired > 0) { + name = StringUtils.left(volumeName, volumeName.length() - trimRequired) + "-" + snapshotUuid; + } + return name; + } + + /** + * Persists snapshot metadata in snapshot_details table. + * + * @param csSnapshotId CloudStack snapshot ID + * @param csVolumeId Source CloudStack volume ID + * @param flexVolUuid ONTAP FlexVolume UUID + * @param ontapSnapshotUuid ONTAP FlexVolume snapshot UUID + * @param snapshotName ONTAP snapshot name + * @param volumePath Path of the volume file/LUN within the FlexVolume (for restore) + * @param storagePoolId Primary storage pool ID + * @param protocol Storage protocol (NFS3 or ISCSI) + * @param lunUuid LUN UUID (only for iSCSI, null for NFS) + */ + private void updateSnapshotDetails(long csSnapshotId, long csVolumeId, String flexVolUuid, + String ontapSnapshotUuid, String snapshotName, + String volumePath, long storagePoolId, String protocol, + String lunUuid) { + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.SRC_CS_VOLUME_ID, String.valueOf(csVolumeId), false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.BASE_ONTAP_FV_ID, flexVolUuid, false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.ONTAP_SNAP_ID, ontapSnapshotUuid, false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.ONTAP_SNAP_NAME, snapshotName, false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.VOLUME_PATH, volumePath, false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.PRIMARY_POOL_ID, String.valueOf(storagePoolId), false); + snapshotDetailsDao.persist(snapshotDetail); + + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.PROTOCOL, protocol, false); + snapshotDetailsDao.persist(snapshotDetail); + + // Store LUN UUID for iSCSI volumes (required for LUN restore API) + if (lunUuid != null && !lunUuid.isEmpty()) { + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + OntapStorageConstants.LUN_DOT_UUID, lunUuid, false); + snapshotDetailsDao.persist(snapshotDetail); + } + } + } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java index f48f83dc28de..8cf21b94b2f1 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java @@ -32,7 +32,7 @@ public interface NASFeignClient { // File Operations - @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}") + @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}?return_metadata=true") @Headers({"Authorization: {authHeader}"}) OntapResponse getFileResponse(@Param("authHeader") String authHeader, @Param("volumeUuid") String volumeUUID, diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java index 5cbba9d683d2..7281dc2ecbeb 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java @@ -23,6 +23,8 @@ import org.apache.cloudstack.storage.feign.model.IscsiService; import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.feign.model.LunMap; +import org.apache.cloudstack.storage.feign.model.LunRestoreRequest; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import feign.Headers; import feign.Param; @@ -88,4 +90,24 @@ public interface SANFeignClient { void deleteLunMap(@Param("authHeader") String authHeader, @Param("lunUuid") String lunUUID, @Param("igroupUuid") String igroupUUID); + + // LUN Restore API + /** + * Restores a LUN from a FlexVolume snapshot. + * + *

ONTAP REST: {@code POST /api/storage/luns/{lun.uuid}/restore}

+ * + *

This API restores the LUN data from a specified snapshot to a destination path. + * The LUN must exist and the snapshot must contain the LUN data.

+ * + * @param authHeader Basic auth header + * @param lunUuid UUID of the LUN to restore + * @param request Request body with snapshot name and destination path + * @return JobResponse containing the async job reference + */ + @RequestLine("POST /api/storage/luns/{lunUuid}/restore") + @Headers({"Authorization: {authHeader}", "Content-Type: application/json"}) + JobResponse restoreLun(@Param("authHeader") String authHeader, + @Param("lunUuid") String lunUuid, + LunRestoreRequest request); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SnapshotFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SnapshotFeignClient.java new file mode 100644 index 000000000000..2f0e050d6f55 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SnapshotFeignClient.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.feign.client; + +import feign.Headers; +import feign.Param; +import feign.QueryMap; +import feign.RequestLine; +import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest; +import org.apache.cloudstack.storage.feign.model.FlexVolSnapshot; +import org.apache.cloudstack.storage.feign.model.SnapshotFileRestoreRequest; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; + +import java.util.Map; + +/** + * Feign client for ONTAP FlexVolume snapshot operations. + * + *

Maps to the ONTAP REST API endpoint: + * {@code /api/storage/volumes/{volume_uuid}/snapshots}

+ * + *

FlexVolume snapshots are point-in-time, space-efficient copies of an entire + * FlexVolume. Unlike file-level clones, a single FlexVolume snapshot atomically + * captures all files/LUNs within the volume, making it ideal for VM-level + * snapshots when multiple CloudStack disks reside on the same FlexVolume.

+ */ +public interface SnapshotFeignClient { + + /** + * Creates a new snapshot for the specified FlexVolume. + * + *

ONTAP REST: {@code POST /api/storage/volumes/{volume_uuid}/snapshots}

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param snapshot Snapshot request body (at minimum, the {@code name} field) + * @return JobResponse containing the async job reference + */ + @RequestLine("POST /api/storage/volumes/{volumeUuid}/snapshots") + @Headers({"Authorization: {authHeader}", "Content-Type: application/json"}) + JobResponse createSnapshot(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + FlexVolSnapshot snapshot); + + /** + * Lists snapshots for the specified FlexVolume. + * + *

ONTAP REST: {@code GET /api/storage/volumes/{volume_uuid}/snapshots}

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param queryParams Optional query parameters (e.g., {@code name}, {@code fields}) + * @return Paginated response of FlexVolSnapshot records + */ + @RequestLine("GET /api/storage/volumes/{volumeUuid}/snapshots") + @Headers({"Authorization: {authHeader}"}) + OntapResponse getSnapshots(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + @QueryMap Map queryParams); + + /** + * Retrieves a specific snapshot by UUID. + * + *

ONTAP REST: {@code GET /api/storage/volumes/{volume_uuid}/snapshots/{uuid}}

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param snapshotUuid UUID of the snapshot + * @return The FlexVolSnapshot object + */ + @RequestLine("GET /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}") + @Headers({"Authorization: {authHeader}"}) + FlexVolSnapshot getSnapshotByUuid(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + @Param("snapshotUuid") String snapshotUuid); + + /** + * Deletes a specific snapshot. + * + *

ONTAP REST: {@code DELETE /api/storage/volumes/{volume_uuid}/snapshots/{uuid}}

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param snapshotUuid UUID of the snapshot to delete + * @return JobResponse containing the async job reference + */ + @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}") + @Headers({"Authorization: {authHeader}"}) + JobResponse deleteSnapshot(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + @Param("snapshotUuid") String snapshotUuid); + + /** + * Restores a volume to a specific snapshot. + * + *

ONTAP REST: {@code PATCH /api/storage/volumes/{volume_uuid}/snapshots/{uuid}} + * with body {@code {"restore": true}} triggers a snapshot restore operation.

+ * + *

Note: This is a destructive operation — all data written after the + * snapshot was taken will be lost.

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param snapshotUuid UUID of the snapshot to restore to + * @param body Request body, typically {@code {"restore": true}} + * @return JobResponse containing the async job reference + */ + @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}?restore_to_snapshot=true") + @Headers({"Authorization: {authHeader}", "Content-Type: application/json"}) + JobResponse restoreSnapshot(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + @Param("snapshotUuid") String snapshotUuid); + + /** + * Restores a single file or LUN from a FlexVolume snapshot. + * + *

ONTAP REST: + * {@code POST /api/storage/volumes/{volume_uuid}/snapshots/{snapshot_uuid}/files/{file_path}/restore}

+ * + *

This restores only the specified file/LUN from the snapshot to the + * given {@code destination_path}, without reverting the entire FlexVolume. + * Ideal when multiple VMs share the same FlexVolume.

+ * + * @param authHeader Basic auth header + * @param volumeUuid UUID of the ONTAP FlexVolume + * @param snapshotUuid UUID of the snapshot containing the file + * @param filePath path of the file within the snapshot (URL-encoded if needed) + * @param request request body with {@code destination_path} + * @return JobResponse containing the async job reference + */ + @RequestLine("POST /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}/files/{filePath}/restore") + @Headers({"Authorization: {authHeader}", "Content-Type: application/json"}) + JobResponse restoreFileFromSnapshot(@Param("authHeader") String authHeader, + @Param("volumeUuid") String volumeUuid, + @Param("snapshotUuid") String snapshotUuid, + @Param("filePath") String filePath, + SnapshotFileRestoreRequest request); + + /** + * Restores a single file or LUN from a FlexVolume snapshot using the CLI native API. + * + *

ONTAP REST (CLI passthrough): + * {@code POST /api/private/cli/volume/snapshot/restore-file}

+ * + *

This CLI-based API is more reliable and works for both NFS files and iSCSI LUNs. + * The request body contains all required parameters: vserver, volume, snapshot, and path.

+ * + *

Example payload: + *

+     * {
+     *   "vserver": "vs0",
+     *   "volume": "rajiv_ONTAP_SP1",
+     *   "snapshot": "DATA-3-428726fe-7440-4b41-8d47-3f654e5d9814",
+     *   "path": "/d266bb2c-d479-47ad-81c3-a070e8bb58c0"
+     * }
+     * 
+ *

+ * + * @param authHeader Basic auth header + * @param request CLI snapshot restore request containing vserver, volume, snapshot, and path + * @return JobResponse containing the async job reference (if applicable) + */ + @RequestLine("POST /api/private/cli/volume/snapshot/restore-file") + @Headers({"Authorization: {authHeader}", "Content-Type: application/json"}) + JobResponse restoreFileFromSnapshotCli(@Param("authHeader") String authHeader, + CliSnapshotRestoreRequest request); +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/CliSnapshotRestoreRequest.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/CliSnapshotRestoreRequest.java new file mode 100644 index 000000000000..be242523f534 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/CliSnapshotRestoreRequest.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.feign.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Request body for the ONTAP CLI-based Snapshot File Restore API. + * + *

ONTAP REST endpoint (CLI passthrough): + * {@code POST /api/private/cli/volume/snapshot/restore-file}

+ * + *

This API restores a single file or LUN from a FlexVolume snapshot to a + * specified destination path using the CLI native implementation. + * It works for both NFS files and iSCSI LUNs.

+ * + *

Example payload: + *

+ * {
+ *   "vserver": "vs0",
+ *   "volume": "rajiv_ONTAP_SP1",
+ *   "snapshot": "DATA-3-428726fe-7440-4b41-8d47-3f654e5d9814",
+ *   "path": "/d266bb2c-d479-47ad-81c3-a070e8bb58c0"
+ * }
+ * 
+ *

+ */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class CliSnapshotRestoreRequest { + + @JsonProperty("vserver") + private String vserver; + + @JsonProperty("volume") + private String volume; + + @JsonProperty("snapshot") + private String snapshot; + + @JsonProperty("path") + private String path; + + public CliSnapshotRestoreRequest() { + } + + /** + * Creates a CLI snapshot restore request. + * + * @param vserver The SVM (vserver) name + * @param volume The FlexVolume name + * @param snapshot The snapshot name + * @param path The file/LUN path to restore (e.g., "/uuid.qcow2" or "/lun_name") + */ + public CliSnapshotRestoreRequest(String vserver, String volume, String snapshot, String path) { + this.vserver = vserver; + this.volume = volume; + this.snapshot = snapshot; + this.path = path; + } + + public String getVserver() { + return vserver; + } + + public void setVserver(String vserver) { + this.vserver = vserver; + } + + public String getVolume() { + return volume; + } + + public void setVolume(String volume) { + this.volume = volume; + } + + public String getSnapshot() { + return snapshot; + } + + public void setSnapshot(String snapshot) { + this.snapshot = snapshot; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + @Override + public String toString() { + return "CliSnapshotRestoreRequest{" + + "vserver='" + vserver + '\'' + + ", volume='" + volume + '\'' + + ", snapshot='" + snapshot + '\'' + + ", path='" + path + '\'' + + '}'; + } +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java index 181620268932..a5dd24a3a286 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java @@ -25,7 +25,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonValue; -import java.time.OffsetDateTime; import java.util.Objects; /** @@ -36,8 +35,6 @@ public class FileInfo { @JsonProperty("bytes_used") private Long bytesUsed = null; - @JsonProperty("creation_time") - private OffsetDateTime creationTime = null; @JsonProperty("fill_enabled") private Boolean fillEnabled = null; @JsonProperty("is_empty") @@ -46,8 +43,6 @@ public class FileInfo { private Boolean isSnapshot = null; @JsonProperty("is_vm_aligned") private Boolean isVmAligned = null; - @JsonProperty("modified_time") - private OffsetDateTime modifiedTime = null; @JsonProperty("name") private String name = null; @JsonProperty("overwrite_enabled") @@ -110,10 +105,6 @@ public Long getBytesUsed() { return bytesUsed; } - public OffsetDateTime getCreationTime() { - return creationTime; - } - public FileInfo fillEnabled(Boolean fillEnabled) { this.fillEnabled = fillEnabled; return this; @@ -149,11 +140,6 @@ public Boolean isIsVmAligned() { return isVmAligned; } - - public OffsetDateTime getModifiedTime() { - return modifiedTime; - } - public FileInfo name(String name) { this.name = name; return this; @@ -266,12 +252,10 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("class FileInfo {\n"); sb.append(" bytesUsed: ").append(toIndentedString(bytesUsed)).append("\n"); - sb.append(" creationTime: ").append(toIndentedString(creationTime)).append("\n"); sb.append(" fillEnabled: ").append(toIndentedString(fillEnabled)).append("\n"); sb.append(" isEmpty: ").append(toIndentedString(isEmpty)).append("\n"); sb.append(" isSnapshot: ").append(toIndentedString(isSnapshot)).append("\n"); sb.append(" isVmAligned: ").append(toIndentedString(isVmAligned)).append("\n"); - sb.append(" modifiedTime: ").append(toIndentedString(modifiedTime)).append("\n"); sb.append(" name: ").append(toIndentedString(name)).append("\n"); sb.append(" overwriteEnabled: ").append(toIndentedString(overwriteEnabled)).append("\n"); sb.append(" path: ").append(toIndentedString(path)).append("\n"); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FlexVolSnapshot.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FlexVolSnapshot.java new file mode 100644 index 000000000000..af5d6f145520 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FlexVolSnapshot.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.feign.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Model representing an ONTAP FlexVolume-level snapshot. + * + *

Maps to the ONTAP REST API resource at + * {@code /api/storage/volumes/{volume.uuid}/snapshots}.

+ * + *

For creation, only the {@code name} field is required in the POST body. + * ONTAP returns the full representation including {@code uuid}, {@code name}, + * and {@code create_time} on GET requests.

+ * + * @see + * ONTAP REST API - Volume Snapshots + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlexVolSnapshot { + + @JsonProperty("uuid") + private String uuid; + + @JsonProperty("name") + private String name; + + @JsonProperty("create_time") + private String createTime; + + @JsonProperty("comment") + private String comment; + + /** Concise reference to the parent volume (returned in GET responses). */ + @JsonProperty("volume") + private VolumeConcise volume; + + public FlexVolSnapshot() { + // default constructor for Jackson + } + + public FlexVolSnapshot(String name) { + this.name = name; + } + + public FlexVolSnapshot(String name, String comment) { + this.name = name; + this.comment = comment; + } + + // ── Getters / Setters ──────────────────────────────────────────────────── + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getCreateTime() { + return createTime; + } + + public void setCreateTime(String createTime) { + this.createTime = createTime; + } + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public VolumeConcise getVolume() { + return volume; + } + + public void setVolume(VolumeConcise volume) { + this.volume = volume; + } + + @Override + public String toString() { + return "FlexVolSnapshot{" + + "uuid='" + uuid + '\'' + + ", name='" + name + '\'' + + ", createTime='" + createTime + '\'' + + ", comment='" + comment + '\'' + + '}'; + } +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunRestoreRequest.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunRestoreRequest.java new file mode 100644 index 000000000000..c645e4a5a16f --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunRestoreRequest.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.feign.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Request body for the ONTAP LUN Restore API. + * + *

ONTAP REST endpoint: + * {@code POST /api/storage/luns/{lun.uuid}/restore}

+ * + *

This API restores a LUN from a FlexVolume snapshot to a specified + * destination path. Unlike file restore, this is LUN-specific.

+ * + *

Example payload: + *

+ * {
+ *   "snapshot": {
+ *     "name": "snapshot_name"
+ *   },
+ *   "destination": {
+ *     "path": "/vol/volume_name/lun_name"
+ *   }
+ * }
+ * 
+ *

+ */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class LunRestoreRequest { + + @JsonProperty("snapshot") + private SnapshotRef snapshot; + + @JsonProperty("destination") + private Destination destination; + + public LunRestoreRequest() { + } + + public LunRestoreRequest(String snapshotName, String destinationPath) { + this.snapshot = new SnapshotRef(snapshotName); + this.destination = new Destination(destinationPath); + } + + public SnapshotRef getSnapshot() { + return snapshot; + } + + public void setSnapshot(SnapshotRef snapshot) { + this.snapshot = snapshot; + } + + public Destination getDestination() { + return destination; + } + + public void setDestination(Destination destination) { + this.destination = destination; + } + + /** + * Nested class for snapshot reference. + */ + @JsonIgnoreProperties(ignoreUnknown = true) + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class SnapshotRef { + + @JsonProperty("name") + private String name; + + public SnapshotRef() { + } + + public SnapshotRef(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + /** + * Nested class for destination path. + */ + @JsonIgnoreProperties(ignoreUnknown = true) + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class Destination { + + @JsonProperty("path") + private String path; + + public Destination() { + } + + public Destination(String path) { + this.path = path; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + } +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java index 8b450331b50a..a42cd02912b3 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java @@ -24,20 +24,18 @@ public class OntapStorage { private final String username; private final String password; - private final String managementLIF; + private final String storageIP; private final String svmName; private final Long size; private final ProtocolType protocolType; - private final Boolean isDisaggregated; - public OntapStorage(String username, String password, String managementLIF, String svmName, Long size, ProtocolType protocolType, Boolean isDisaggregated) { + public OntapStorage(String username, String password, String storageIP, String svmName, Long size, ProtocolType protocolType) { this.username = username; this.password = password; - this.managementLIF = managementLIF; + this.storageIP = storageIP; this.svmName = svmName; this.size = size; this.protocolType = protocolType; - this.isDisaggregated = isDisaggregated; } public String getUsername() { @@ -48,13 +46,9 @@ public String getPassword() { return password; } - public String getManagementLIF() { - return managementLIF; - } + public String getStorageIP() { return storageIP; } - public String getSvmName() { - return svmName; - } + public String getSvmName() { return svmName; } public Long getSize() { return size; @@ -63,8 +57,4 @@ public Long getSize() { public ProtocolType getProtocol() { return protocolType; } - - public Boolean getIsDisaggregated() { - return isDisaggregated; - } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/SnapshotFileRestoreRequest.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/SnapshotFileRestoreRequest.java new file mode 100644 index 000000000000..1f02e0c07470 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/SnapshotFileRestoreRequest.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.feign.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Request body for the ONTAP Snapshot File Restore API. + * + *

ONTAP REST endpoint: + * {@code POST /api/storage/volumes/{volume.uuid}/snapshots/{snapshot.uuid}/files/{file.path}/restore}

+ * + *

This API restores a single file or LUN from a FlexVolume snapshot to a + * specified destination path, without reverting the entire FlexVolume.

+ */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class SnapshotFileRestoreRequest { + + @JsonProperty("destination_path") + private String destinationPath; + + public SnapshotFileRestoreRequest() { + } + + public SnapshotFileRestoreRequest(String destinationPath) { + this.destinationPath = destinationPath; + } + + public String getDestinationPath() { + return destinationPath; + } + + public void setDestinationPath(String destinationPath) { + this.destinationPath = destinationPath; + } +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeConcise.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeConcise.java new file mode 100644 index 000000000000..602c9ff73658 --- /dev/null +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeConcise.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.feign.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class VolumeConcise { + @JsonProperty("uuid") + private String uuid; + @JsonProperty("name") + private String name; + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + public String getName() { + return name; + } + public void setName(String name) { this.name = name; } +} diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 7a66c0a72fe2..baf19f71b759 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -31,7 +31,6 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; -import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -55,12 +54,12 @@ import org.apache.cloudstack.storage.utils.OntapStorageConstants; import org.apache.cloudstack.storage.utils.OntapStorageUtils; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -79,12 +78,16 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl private static final long ONTAP_MIN_VOLUME_SIZE_IN_BYTES = 1677721600L; + /** + * Creates primary storage on NetApp storage + * @param dsInfos datastore information map + * @return DataStore instance + */ @Override public DataStore initialize(Map dsInfos) { if (dsInfos == null) { throw new CloudRuntimeException("Datastore info map is null, cannot create primary storage"); } - String url = (String) dsInfos.get("url"); Long zoneId = (Long) dsInfos.get("zoneId"); Long podId = (Long) dsInfos.get("podId"); Long clusterId = (Long) dsInfos.get("clusterId"); @@ -99,10 +102,11 @@ public DataStore initialize(Map dsInfos) { ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId); logger.debug("Received capacityBytes from UI: " + capacityBytes); + // Additional details requested for ONTAP primary storage pool creation @SuppressWarnings("unchecked") Map details = (Map) dsInfos.get("details"); - capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, url, details); + capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, details); PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); if (clusterId != null) { @@ -115,23 +119,21 @@ public DataStore initialize(Map dsInfos) { } details.put(OntapStorageConstants.SIZE, capacityBytes.toString()); - details.putIfAbsent(OntapStorageConstants.IS_DISAGGREGATED, "false"); ProtocolType protocol = ProtocolType.valueOf(details.get(OntapStorageConstants.PROTOCOL)); -// long volumeSize = Long.parseLong(details.get(OntapStorageConstants.SIZE)); OntapStorage ontapStorage = new OntapStorage( details.get(OntapStorageConstants.USERNAME), details.get(OntapStorageConstants.PASSWORD), - details.get(OntapStorageConstants.MANAGEMENT_LIF), + details.get(OntapStorageConstants.STORAGE_IP), details.get(OntapStorageConstants.SVM_NAME), capacityBytes, - protocol, - Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED).toLowerCase())); + protocol); StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage); boolean isValid = storageStrategy.connect(); if (isValid) { + // Get the DataLIF for data access String dataLIF = storageStrategy.getNetworkInterface(); if (dataLIF == null || dataLIF.isEmpty()) { throw new CloudRuntimeException("Failed to retrieve Data LIF from ONTAP, cannot create primary storage"); @@ -157,6 +159,7 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage"); } + // Determine storage pool type, path and port based on protocol String path; int port; switch (protocol) { @@ -164,7 +167,9 @@ public DataStore initialize(Map dsInfos) { parameters.setType(Storage.StoragePoolType.NetworkFilesystem); path = OntapStorageConstants.SLASH + storagePoolName; port = OntapStorageConstants.NFS3_PORT; - logger.info("Setting NFS path for storage pool: " + path + ", port: " + port); + // Force NFSv3 for ONTAP managed storage to avoid NFSv4 ID mapping issues + details.put(OntapStorageConstants.NFS_MOUNT_OPTIONS, OntapStorageConstants.NFS3_MOUNT_OPTIONS_VER_3); + logger.info("Setting NFS path for storage pool: " + path + ", port: " + port + " with mount option: vers=3"); break; case ISCSI: parameters.setType(Storage.StoragePoolType.Iscsi); @@ -196,9 +201,9 @@ public DataStore initialize(Map dsInfos) { } private long validateInitializeInputs(Long capacityBytes, Long podId, Long clusterId, Long zoneId, - String storagePoolName, String providerName, boolean managed, String url, Map details) { + String storagePoolName, String providerName, boolean managed, Map details) { - // Capacity validation + // Validate and set capacity if (capacityBytes == null || capacityBytes <= 0) { logger.warn("capacityBytes not provided or invalid (" + capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE_IN_BYTES); capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES; @@ -207,11 +212,12 @@ private long validateInitializeInputs(Long capacityBytes, Long podId, Long clust capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES; } - // Scope (pod/cluster/zone) validation + // Validate scope if (podId == null ^ clusterId == null) { throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage"); } - if (podId == null && clusterId == null) { + + if (podId == null) { if (zoneId != null) { logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone"); } else { @@ -219,58 +225,54 @@ private long validateInitializeInputs(Long capacityBytes, Long podId, Long clust } } - // Basic parameter validation - if (StringUtils.isBlank(storagePoolName)) { + if (storagePoolName == null || storagePoolName.isEmpty()) { throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage"); } - if (StringUtils.isBlank(providerName)) { + + if (providerName == null || providerName.isEmpty()) { throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage"); } + + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + if (clusterId != null) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster"); + if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) { + throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor"); + } + parameters.setHypervisorType(clusterVO.getHypervisorType()); + } + logger.debug("ONTAP primary storage will be created as " + (managed ? "managed" : "unmanaged")); if (!managed) { throw new CloudRuntimeException("ONTAP primary storage must be managed"); } - // Details key validation + //Required ONTAP detail keys Set requiredKeys = Set.of( OntapStorageConstants.USERNAME, OntapStorageConstants.PASSWORD, OntapStorageConstants.SVM_NAME, OntapStorageConstants.PROTOCOL, - OntapStorageConstants.MANAGEMENT_LIF - ); - Set optionalKeys = Set.of( - OntapStorageConstants.IS_DISAGGREGATED + OntapStorageConstants.STORAGE_IP ); - Set allowedKeys = new java.util.HashSet<>(requiredKeys); - allowedKeys.addAll(optionalKeys); - - if (StringUtils.isNotBlank(url)) { - for (String segment : url.split(OntapStorageConstants.SEMICOLON)) { - if (segment.isEmpty()) { - continue; - } - String[] kv = segment.split(OntapStorageConstants.EQUALS, 2); - if (kv.length == 2) { - details.put(kv[0].trim(), kv[1].trim()); - } - } - } + // Validate existing entries (reject unexpected keys, empty values) for (Map.Entry e : details.entrySet()) { String key = e.getKey(); String val = e.getValue(); - if (!allowedKeys.contains(key)) { + if (!requiredKeys.contains(key)) { throw new CloudRuntimeException("Unexpected ONTAP detail key in URL: " + key); } - if (StringUtils.isBlank(val)) { + if (val == null || val.isEmpty()) { throw new CloudRuntimeException("ONTAP primary storage creation failed, empty detail: " + key); } } - Set providedKeys = new HashSet<>(details.keySet()); + // Detect missing required keys + Set providedKeys = new java.util.HashSet<>(details.keySet()); if (!providedKeys.containsAll(requiredKeys)) { - Set missing = new HashSet<>(requiredKeys); + Set missing = new java.util.HashSet<>(requiredKeys); missing.removeAll(providedKeys); throw new CloudRuntimeException("ONTAP primary storage creation failed, missing detail(s): " + missing); } @@ -282,16 +284,16 @@ private long validateInitializeInputs(Long capacityBytes, Long podId, Long clust public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.debug("In attachCluster for ONTAP primary storage"); if (dataStore == null) { - throw new InvalidParameterValueException("attachCluster: dataStore should not be null"); + throw new InvalidParameterValueException(" dataStore should not be null"); } if (scope == null) { - throw new InvalidParameterValueException("attachCluster: scope should not be null"); + throw new InvalidParameterValueException(" scope should not be null"); } List hostsIdentifier = new ArrayList<>(); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException(" Storage Pool not found for id: " + dataStore.getId()); } PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); @@ -306,21 +308,24 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.error(errMsg); throw new CloudRuntimeException(errMsg); } - logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); - if (hostsIdentifier != null && hostsIdentifier.size() > 0) { - try { - AccessGroup accessGroupRequest = new AccessGroup(); - accessGroupRequest.setHostsToConnect(hostsToConnect); - accessGroupRequest.setScope(scope); - primaryStore.setDetails(details); - accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); - strategy.createAccessGroup(accessGroupRequest); - } catch (Exception e) { - logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); - throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); + // We need to create export policy at pool level and igroup at host level(in grantAccess) + if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + // If there are no eligible host, export policy or igroup will not be created and will be taken as part of HostListener + if (!hostsIdentifier.isEmpty()) { + try { + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setStoragePoolId(storagePool.getId()); + strategy.createAccessGroup(accessGroupRequest); + } catch (Exception e) { + logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); + throw new CloudRuntimeException("Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); + } } } + logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { try { @@ -343,16 +348,16 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { logger.debug("In attachZone for ONTAP primary storage"); if (dataStore == null) { - throw new InvalidParameterValueException("attachZone: dataStore should not be null"); + throw new InvalidParameterValueException("dataStore should not be null"); } if (scope == null) { - throw new InvalidParameterValueException("attachZone: scope should not be null"); + throw new InvalidParameterValueException("scope should not be null"); } List hostsIdentifier = new ArrayList<>(); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId()); } PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; @@ -369,17 +374,21 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper logger.error(errMsg); throw new CloudRuntimeException(errMsg); } - if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { - try { - AccessGroup accessGroupRequest = new AccessGroup(); - accessGroupRequest.setHostsToConnect(hostsToConnect); - accessGroupRequest.setScope(scope); - primaryStore.setDetails(details); - accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); - strategy.createAccessGroup(accessGroupRequest); - } catch (Exception e) { - logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); - throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); + + // We need to create export policy at pool level and igroup at host level + if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) { + // If there are no eligible host, export policy or igroup will not be created and will be taken as part of HostListener + if (!hostsIdentifier.isEmpty()) { + try { + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + accessGroupRequest.setStoragePoolId(storagePool.getId()); + strategy.createAccessGroup(accessGroupRequest); + } catch (Exception e) { + logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); + throw new CloudRuntimeException(" Failed to create access group on storage system for zone with Exception: " + e.getMessage()); + } } } for (HostVO host : hostsToConnect) { @@ -401,7 +410,8 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host for (HostVO host : hosts) { if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty() || !host.getStorageUrl().startsWith(protocolPrefix)) { - return false; + // TODO we will inform customer through alert for excluded host because of protocol enabled on host + continue; } hostIdentifiers.add(host.getStorageUrl()); } @@ -411,18 +421,18 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host for (HostVO host : hosts) { if (host != null) { ip = host.getStorageIpAddress() != null ? host.getStorageIpAddress().trim() : ""; - if (ip.isEmpty()) { - if (host.getPrivateIpAddress() == null || host.getPrivateIpAddress().trim().isEmpty()) { - return false; - } - ip = host.getPrivateIpAddress().trim(); + if (ip.isEmpty() && StringUtils.isBlank(host.getPrivateIpAddress() )) { + // TODO we will inform customer through alert for excluded host because of protocol enabled on host + continue; + } else { + ip = ip.isEmpty() ? host.getPrivateIpAddress().trim() : ip; } } hostIdentifiers.add(ip); } break; default: - throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name()); + throw new CloudRuntimeException("Unsupported protocol: " + protocolType.name()); } logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name()); return true; @@ -453,13 +463,15 @@ public boolean deleteDataStore(DataStore store) { logger.info("deleteDataStore: Starting deletion process for storage pool id: {}", store.getId()); long storagePoolId = store.getId(); + // Get the StoragePool details StoragePool storagePool = _storageMgr.getStoragePool(storagePoolId); if (storagePool == null) { logger.warn("deleteDataStore: Storage pool not found for id: {}, skipping deletion", storagePoolId); - return true; + return true; // Return true since the entity doesn't exist } try { + // Fetch storage pool details Map details = _datastoreDetailsDao.listDetailsKeyPairs(storagePoolId); if (details == null || details.isEmpty()) { logger.warn("deleteDataStore: No details found for storage pool id: {}, proceeding with CS entity deletion only", storagePoolId); @@ -468,11 +480,14 @@ public boolean deleteDataStore(DataStore store) { logger.info("deleteDataStore: Deleting access groups for storage pool '{}'", storagePool.getName()); + // Get the storage strategy to interact with ONTAP StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details); + // Cast DataStore to PrimaryDataStoreInfo to get full details PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) store; primaryDataStoreInfo.setDetails(details); + // Call deleteStorageVolume to delete the underlying ONTAP volume logger.info("deleteDataStore: Deleting ONTAP volume for storage pool '{}'", storagePool.getName()); Volume volume = new Volume(); volume.setUuid(details.get(OntapStorageConstants.VOLUME_UUID)); @@ -490,16 +505,19 @@ public boolean deleteDataStore(DataStore store) { storagePoolId, e.getMessage(), e); } AccessGroup accessGroup = new AccessGroup(); - accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo); + accessGroup.setStoragePoolId(storagePoolId); + // Delete access groups associated with this storage pool storageStrategy.deleteAccessGroup(accessGroup); logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName()); } catch (Exception e) { logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}", storagePoolId, e.getMessage(), e); + // Continue with CloudStack entity deletion even if ONTAP cleanup fails logger.warn("deleteDataStore: Proceeding with CloudStack entity deletion despite ONTAP cleanup failure"); } + // Delete the CloudStack primary data store entity return _dataStoreHelper.deletePrimaryDataStore(store); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java index a7c851dbe718..29aa89bff6c3 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java @@ -37,9 +37,12 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import com.cloud.host.dao.HostDao; +import java.util.Map; + public class OntapHostListener implements HypervisorHostListener { protected Logger logger = LogManager.getLogger(getClass()); @@ -53,6 +56,9 @@ public class OntapHostListener implements HypervisorHostListener { private HostDao _hostDao; @Inject private StoragePoolHostDao storagePoolHostDao; + @Inject + private StoragePoolDetailsDao _storagePoolDetailsDao; + @Override public boolean hostConnect(long hostId, long poolId) { @@ -63,6 +69,7 @@ public boolean hostConnect(long hostId, long poolId) { return false; } + // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM) StoragePool pool = _storagePoolDao.findById(poolId); if (pool == null) { logger.error("Failed to connect host - storage pool not found with id: {}", poolId); @@ -70,7 +77,12 @@ public boolean hostConnect(long hostId, long poolId) { } logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName()); try { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + // Load storage pool details from database to pass mount options and other config to agent + Map detailsMap = _storagePoolDetailsDao.listDetailsKeyPairs(poolId); + // Create the ModifyStoragePoolCommand to send to the agent + // Note: Always send command even if database entry exists, because agent may have restarted + // and lost in-memory pool registration. The command handler is idempotent. + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, detailsMap); Answer answer = _agentMgr.easySend(hostId, cmd); @@ -87,9 +99,12 @@ public boolean hostConnect(long hostId, long poolId) { "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails())); } + // Get the mount path from the answer + if (!(answer instanceof ModifyStoragePoolAnswer)) { - logger.error("Received unexpected answer type {} for storage pool {}", answer.getClass().getName(), pool.getName()); - throw new CloudRuntimeException("Failed to connect to storage pool. Please check agent logs for details."); + throw new CloudRuntimeException(String.format( + "Unexpected answer type %s returned for modify storage pool command for pool %s on host %d", + answer.getClass().getName(), pool, hostId)); } ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; @@ -101,6 +116,7 @@ public boolean hostConnect(long hostId, long poolId) { String localPath = poolInfo.getLocalPath(); logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath); + // Update or create the storage_pool_host_ref entry with the correct local_path StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); if (storagePoolHost == null) { @@ -113,6 +129,7 @@ public boolean hostConnect(long hostId, long poolId) { logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath); } + // Update pool capacity/usage information StoragePoolVO poolVO = _storagePoolDao.findById(poolId); if (poolVO != null && poolInfo.getCapacityBytes() > 0) { poolVO.setCapacityBytes(poolInfo.getCapacityBytes()); @@ -123,6 +140,8 @@ public boolean hostConnect(long hostId, long poolId) { } catch (Exception e) { logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e); + // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops + // Return false to indicate failure without crashing return false; } return true; @@ -137,6 +156,7 @@ public boolean hostDisconnected(Host host, StoragePool pool) { logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId()); return false; } + // TODO add storage pool get validation logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName()); try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java index 5c0bf1af4454..cb9ac6f61bcc 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java @@ -21,6 +21,9 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; + +import java.nio.charset.StandardCharsets; + import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.UnifiedNASStrategy; @@ -36,23 +39,25 @@ public class StorageProviderFactory { public static StorageStrategy getStrategy(OntapStorage ontapStorage) { ProtocolType protocol = ontapStorage.getProtocol(); logger.info("Initializing StorageProviderFactory with protocol: " + protocol); + String decodedPassword = new String(java.util.Base64.getDecoder().decode(ontapStorage.getPassword()), StandardCharsets.UTF_8); + ontapStorage = new OntapStorage( + ontapStorage.getUsername(), + decodedPassword, + ontapStorage.getStorageIP(), + ontapStorage.getSvmName(), + ontapStorage.getSize(), + protocol); switch (protocol) { case NFS3: - if (!ontapStorage.getIsDisaggregated()) { - UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); - ComponentContext.inject(unifiedNASStrategy); - unifiedNASStrategy.setOntapStorage(ontapStorage); - return unifiedNASStrategy; - } - throw new CloudRuntimeException("Unsupported configuration: Disaggregated ONTAP is not supported."); + UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage); + ComponentContext.inject(unifiedNASStrategy); + unifiedNASStrategy.setOntapStorage(ontapStorage); + return unifiedNASStrategy; case ISCSI: - if (!ontapStorage.getIsDisaggregated()) { - UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage); - ComponentContext.inject(unifiedSANStrategy); - unifiedSANStrategy.setOntapStorage(ontapStorage); - return unifiedSANStrategy; - } - throw new CloudRuntimeException("Unsupported configuration: Disaggregated ONTAP is not supported."); + UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage); + ComponentContext.inject(unifiedSANStrategy); + unifiedSANStrategy.setOntapStorage(ontapStorage); + return unifiedSANStrategy; default: throw new CloudRuntimeException("Unsupported protocol: " + protocol); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java index ce3b2806ef75..4b1bca00f95c 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java @@ -19,11 +19,54 @@ package org.apache.cloudstack.storage.service; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public abstract class SANStrategy extends StorageStrategy { + private static final Logger s_logger = LogManager.getLogger(SANStrategy.class); public SANStrategy(OntapStorage ontapStorage) { super(ontapStorage); } + /** + * Ensures the LUN is mapped to the specified access group (igroup). + * If a mapping already exists, returns the existing LUN number. + * If not, creates a new mapping and returns the assigned LUN number. + * + * @param svmName the SVM name + * @param lunName the LUN name + * @param accessGroupName the igroup name + * @return the logical unit number as a String + */ + public abstract String ensureLunMapped(String svmName, String lunName, String accessGroupName); + + /** + * Validates that the host initiator is present in the access group (igroup). + * + * @param hostInitiator the host initiator IQN + * @param svmName the SVM name + * @param igroup the igroup + * @return true if the initiator is found in the igroup, false otherwise + */ + public boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, Igroup igroup) { + s_logger.info("validateInitiatorInAccessGroup: Validating initiator [{}] is in igroup [{}] on SVM [{}]", hostInitiator, igroup, svmName); + + if (hostInitiator == null || hostInitiator.isEmpty()) { + s_logger.warn("validateInitiatorInAccessGroup: host initiator is null or empty"); + return false; + } + if (igroup.getInitiators() != null) { + for (Initiator initiator : igroup.getInitiators()) { + if (initiator.getName().equalsIgnoreCase(hostInitiator)) { + s_logger.info("validateInitiatorInAccessGroup: Initiator [{}] validated successfully in igroup [{}]", hostInitiator, igroup); + return true; + } + } + } + s_logger.warn("validateInitiatorInAccessGroup: Initiator [{}] NOT found in igroup [{}]", hostInitiator, igroup); + return false; + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 2eb459c78919..7d9dd33f7eff 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -17,7 +17,7 @@ * under the License. */ - package org.apache.cloudstack.storage.service; +package org.apache.cloudstack.storage.service; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; @@ -25,7 +25,9 @@ import org.apache.cloudstack.storage.feign.client.AggregateFeignClient; import org.apache.cloudstack.storage.feign.client.JobFeignClient; import org.apache.cloudstack.storage.feign.client.NetworkFeignClient; +import org.apache.cloudstack.storage.feign.client.NASFeignClient; import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import org.apache.cloudstack.storage.feign.client.SnapshotFeignClient; import org.apache.cloudstack.storage.feign.client.SvmFeignClient; import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.Aggregate; @@ -51,25 +53,39 @@ import java.util.Map; import java.util.Objects; +/** + * Storage Strategy represents the communication path for all the ONTAP storage options + * + * ONTAP storage operation would vary based on + * Supported protocols: NFS3.0, NFS4.1, FC, iSCSI, Nvme/TCP and Nvme/FC + * Supported platform: Unified and Disaggregated + */ public abstract class StorageStrategy { - private final FeignClientFactory feignClientFactory; - private final AggregateFeignClient aggregateFeignClient; - private final VolumeFeignClient volumeFeignClient; - private final SvmFeignClient svmFeignClient; - private final JobFeignClient jobFeignClient; - private final NetworkFeignClient networkFeignClient; - private final SANFeignClient sanFeignClient; + // Replace @Inject Feign clients with FeignClientFactory + protected FeignClientFactory feignClientFactory; + protected AggregateFeignClient aggregateFeignClient; + protected VolumeFeignClient volumeFeignClient; + protected SvmFeignClient svmFeignClient; + protected JobFeignClient jobFeignClient; + protected NetworkFeignClient networkFeignClient; + protected SANFeignClient sanFeignClient; + protected NASFeignClient nasFeignClient; + protected SnapshotFeignClient snapshotFeignClient; protected OntapStorage storage; + /** + * Presents aggregate object for the unified storage, not eligible for disaggregated + */ private List aggregates; private static final Logger logger = LogManager.getLogger(StorageStrategy.class); public StorageStrategy(OntapStorage ontapStorage) { storage = ontapStorage; - String baseURL = OntapStorageConstants.HTTPS + storage.getManagementLIF(); + String baseURL = OntapStorageConstants.HTTPS + storage.getStorageIP(); logger.info("Initializing StorageStrategy with base URL: " + baseURL); + // Initialize FeignClientFactory and create clients this.feignClientFactory = new FeignClientFactory(); this.aggregateFeignClient = feignClientFactory.createClient(AggregateFeignClient.class, baseURL); this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL); @@ -77,14 +93,18 @@ public StorageStrategy(OntapStorage ontapStorage) { this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL); this.networkFeignClient = feignClientFactory.createClient(NetworkFeignClient.class, baseURL); this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL); + this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); + this.snapshotFeignClient = feignClientFactory.createClient(SnapshotFeignClient.class, baseURL); } + // Connect method to validate ONTAP cluster, credentials, protocol, and SVM public boolean connect() { - logger.info("Attempting to connect to ONTAP cluster at " + storage.getManagementLIF() + " and validate SVM " + + logger.info("Attempting to connect to ONTAP cluster at " + storage.getStorageIP() + " and validate SVM " + storage.getSvmName() + ", protocol " + storage.getProtocol()); String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); String svmName = storage.getSvmName(); try { + // Call the SVM API to check if the SVM exists Svm svm = new Svm(); logger.info("Fetching the SVM details..."); Map queryParams = Map.of(OntapStorageConstants.NAME, svmName, OntapStorageConstants.FIELDS, OntapStorageConstants.AGGREGATES + @@ -146,6 +166,17 @@ public boolean connect() { return true; } + // Common methods like create/delete etc., should be here + + /** + * Creates ONTAP Flex-Volume + * Eligible only for Unified ONTAP storage + * throw exception in case of disaggregated ONTAP storage + * + * @param volumeName the name of the volume to create + * @param size the size of the volume in bytes + * @return the created Volume object + */ public Volume createStorageVolume(String volumeName, Long size) { logger.info("Creating volume: " + volumeName + " of size: " + size + " bytes"); @@ -160,6 +191,7 @@ public Volume createStorageVolume(String volumeName, Long size) { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // Generate the Create Volume Request Volume volumeRequest = new Volume(); Svm svm = new Svm(); svm.setName(svmName); @@ -169,6 +201,7 @@ public Volume createStorageVolume(String volumeName, Long size) { volumeRequest.setName(volumeName); volumeRequest.setSvm(svm); + // Pick the best aggregate for this specific request (largest available, online, and sufficient space). long maxAvailableAggregateSpaceBytes = -1L; Aggregate aggrChosen = null; for (Aggregate aggr : aggregates) { @@ -224,7 +257,7 @@ public Volume createStorageVolume(String volumeName, Long size) { } String jobUUID = jobResponse.getJob().getUuid(); - Boolean jobSucceeded = jobPollForSuccess(jobUUID); + Boolean jobSucceeded = jobPollForSuccess(jobUUID,10, 1); if (!jobSucceeded) { logger.error("Volume creation job failed for volume: " + volumeName); throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName); @@ -234,6 +267,8 @@ public Volume createStorageVolume(String volumeName, Long size) { logger.error("Exception while creating volume: ", e); throw new CloudRuntimeException("Failed to create volume: " + e.getMessage()); } + // Verify if the Volume has been created and set the Volume object + // Call the VolumeFeignClient to get the created volume details OntapResponse volumesResponse = volumeFeignClient.getAllVolumes(authHeader, Map.of(OntapStorageConstants.NAME, volumeName)); if (volumesResponse == null || volumesResponse.getRecords() == null || volumesResponse.getRecords().isEmpty()) { logger.error("Volume " + volumeName + " not found after creation."); @@ -281,16 +316,32 @@ public Volume createStorageVolume(String volumeName, Long size) { } } + /** + * Updates ONTAP Flex-Volume + * Eligible only for Unified ONTAP storage + * throw exception in case of disaggregated ONTAP storage + * + * @param volume the volume to update + * @return the updated Volume object + */ public Volume updateStorageVolume(Volume volume) { return null; } + /** + * Delete ONTAP Flex-Volume + * Eligible only for Unified ONTAP storage + * throw exception in case of disaggregated ONTAP storage + * + * @param volume the volume to delete + */ public void deleteStorageVolume(Volume volume) { logger.info("Deleting ONTAP volume by name: " + volume.getName() + " and uuid: " + volume.getUuid()); String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); try { + // TODO: Implement lun and file deletion, if any, before deleting the volume JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid()); - Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid()); + Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid(),10, 1); if (!jobSucceeded) { logger.error("Volume deletion job failed for volume: " + volume.getName()); throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName()); @@ -303,10 +354,25 @@ public void deleteStorageVolume(Volume volume) { logger.info("ONTAP volume deletion process completed for volume: " + volume.getName()); } + /** + * Gets ONTAP Flex-Volume + * Eligible only for Unified ONTAP storage + * throw exception in case of disaggregated ONTAP storage + * + * @param volume the volume to retrieve + * @return the retrieved Volume object + */ public Volume getStorageVolume(Volume volume) { return null; } + /** + * Get the storage path based on protocol. + * For iSCSI: Returns the iSCSI target IQN (e.g., iqn.1992-08.com.netapp:sn.xxx:vs.3) + * For NFS: Returns the mount path (to be implemented) + * + * @return the storage path as a String + */ public String getStoragePath() { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); String targetIqn = null; @@ -336,6 +402,7 @@ public String getStoragePath() { return targetIqn; } else if (storage.getProtocol() == ProtocolType.NFS3) { + // TODO: Implement NFS path retrieval logic } else { throw new CloudRuntimeException("Unsupported protocol for path retrieval: " + storage.getProtocol()); } @@ -347,6 +414,14 @@ public String getStoragePath() { return targetIqn; } + + + /** + * Get the network ip interface + * + * @return the network interface ip as a String + */ + public String getNetworkInterface() { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); try { @@ -371,6 +446,7 @@ public String getNetworkInterface() { networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams); if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) { IpInterface ipInterface = null; + // For simplicity, return the first interface's name (Of IPv4 type for NFS3) if (storage.getProtocol() == ProtocolType.ISCSI) { ipInterface = response.getRecords().get(0); } else if (storage.getProtocol() == ProtocolType.NFS3) { @@ -394,37 +470,189 @@ public String getNetworkInterface() { } } + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * createLun for iSCSI, FC protocols + * createFile for NFS3.0 and NFS4.1 protocols + * createNameSpace for Nvme/TCP and Nvme/FC protocol + * + * @param cloudstackVolume the CloudStack volume to create + * @return the created CloudStackVolume object + */ abstract public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * updateLun for iSCSI, FC protocols + * updateFile for NFS3.0 and NFS4.1 protocols + * updateNameSpace for Nvme/TCP and Nvme/FC protocol + * + * @param cloudstackVolume the CloudStack volume to update + * @return the updated CloudStackVolume object + */ abstract CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * deleteLun for iSCSI, FC protocols + * deleteFile for NFS3.0 and NFS4.1 protocols + * deleteNameSpace for Nvme/TCP and Nvme/FC protocol + * + * @param cloudstackVolume the CloudStack volume to delete + */ abstract public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * cloneLun for iSCSI, FC protocols + * cloneFile for NFS3.0 and NFS4.1 protocols + * cloneNameSpace for Nvme/TCP and Nvme/FC protocol + * @param cloudstackVolume the CloudStack volume to copy + */ abstract public void copyCloudStackVolume(CloudStackVolume cloudstackVolume); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * getLun for iSCSI, FC protocols + * getFile for NFS3.0 and NFS4.1 protocols + * getNameSpace for Nvme/TCP and Nvme/FC protocol + * @param cloudStackVolumeMap the CloudStack volume to retrieve + * @return the retrieved CloudStackVolume object + */ abstract public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap); + /** + * Reverts a CloudStack volume to a snapshot using protocol-specific ONTAP APIs. + * + *

This method encapsulates the snapshot revert behavior based on protocol:

+ *
    + *
  • iSCSI/FC: Uses {@code POST /api/storage/luns/{lun.uuid}/restore} + * to restore LUN data from the FlexVolume snapshot.
  • + *
  • NFS: Uses {@code POST /api/storage/volumes/{vol.uuid}/snapshots/{snap.uuid}/files/{path}/restore} + * to restore a single file from the FlexVolume snapshot.
  • + *
+ * + * @param snapshotName The ONTAP FlexVolume snapshot name + * @param flexVolUuid The FlexVolume UUID containing the snapshot + * @param snapshotUuid The ONTAP snapshot UUID (used for NFS file restore) + * @param volumePath The path of the file/LUN within the FlexVolume + * @param lunUuid The LUN UUID (only for iSCSI, null for NFS) + * @param flexVolName The FlexVolume name (only for iSCSI, for constructing destination path) + * @return JobResponse for the async restore operation + */ + public abstract JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid, + String snapshotUuid, String volumePath, + String lunUuid, String flexVolName); + + + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * createiGroup for iSCSI and FC protocols + * createExportPolicy for NFS 3.0 and NFS 4.1 protocols + * createSubsystem for Nvme/TCP and Nvme/FC protocols + * @param accessGroup the access group to create + * @return the created AccessGroup object + */ abstract public AccessGroup createAccessGroup(AccessGroup accessGroup); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * deleteiGroup for iSCSI and FC protocols + * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols + * deleteSubsystem for Nvme/TCP and Nvme/FC protocols + * @param accessGroup the access group to delete + */ abstract public void deleteAccessGroup(AccessGroup accessGroup); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * updateiGroup example add/remove-Iqn for iSCSI and FC protocols + * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols + * //TODO for Nvme/TCP and Nvme/FC protocols + * @param accessGroup the access group to update + * @return the updated AccessGroup object + */ abstract AccessGroup updateAccessGroup(AccessGroup accessGroup); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * e.g., getIGroup for iSCSI and FC protocols + * e.g., getExportPolicy for NFS 3.0 and NFS 4.1 protocols + * //TODO for Nvme/TCP and Nvme/FC protocols + * @param values map to get access group values like name, svm name etc. + */ abstract public AccessGroup getAccessGroup(Map values); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * lunMap for iSCSI and FC protocols + * //TODO for NFS 3.0 and NFS 4.1 protocols (e.g., export rule management) + * //TODO for Nvme/TCP and Nvme/FC protocols + * @param values map including SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS + * @return map containing logical unit number for the new/existing mapping (SAN) or relevant info for NAS + */ abstract public Map enableLogicalAccess(Map values); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * lunUnmap for iSCSI and FC protocols + * @param values map including LUN UUID and iGroup UUID (for SAN) or equivalent for NAS + */ abstract public void disableLogicalAccess(Map values); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * lunMap lookup for iSCSI/FC protocols (GET-only, no side-effects) + * @param values map with SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS + * @return map containing logical unit number if mapping exists; otherwise null + */ abstract public Map getLogicalAccess(Map values); - private Boolean jobPollForSuccess(String jobUUID) { + // ── FlexVolume Snapshot accessors ──────────────────────────────────────── + + /** + * Returns the {@link SnapshotFeignClient} for ONTAP FlexVolume snapshot operations. + */ + public SnapshotFeignClient getSnapshotFeignClient() { + return snapshotFeignClient; + } + + /** + * Returns the {@link NASFeignClient} for ONTAP NAS file operations + * (including file clone for single-file SnapRestore). + */ + public NASFeignClient getNasFeignClient() { + return nasFeignClient; + } + + /** + * Generates the Basic-auth header for ONTAP REST calls. + */ + public String getAuthHeader() { + return OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + } + + /** + * Polls an ONTAP async job for successful completion. + * + * @param jobUUID UUID of the ONTAP job to poll + * @param maxRetries maximum number of poll attempts + * @param sleepTimeInSecs seconds to sleep between poll attempts + * @return true if the job completed successfully + */ + public Boolean jobPollForSuccess(String jobUUID, int maxRetries, int sleepTimeInSecs) { + //Create URI for GET Job API int jobRetryCount = 0; Job jobResp = null; try { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); while (jobResp == null || !jobResp.getState().equals(OntapStorageConstants.JOB_SUCCESS)) { - if (jobRetryCount >= OntapStorageConstants.JOB_MAX_RETRIES) { + if (jobRetryCount >= maxRetries) { logger.error("Job did not complete within expected time."); throw new CloudRuntimeException("Job did not complete within expected time."); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 54dee01ac2b6..1b9af868f7dd 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -19,19 +19,22 @@ package org.apache.cloudstack.storage.service; +import com.cloud.agent.api.Answer; import com.cloud.host.HostVO; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.exception.CloudRuntimeException; import feign.FeignException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.feign.FeignClientFactory; -import org.apache.cloudstack.storage.feign.client.JobFeignClient; -import org.apache.cloudstack.storage.feign.client.NASFeignClient; -import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.ExportRule; +import org.apache.cloudstack.storage.feign.model.FileInfo; import org.apache.cloudstack.storage.feign.model.Job; import org.apache.cloudstack.storage.feign.model.Nas; import org.apache.cloudstack.storage.feign.model.OntapStorage; @@ -39,8 +42,10 @@ import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.feign.model.response.JobResponse; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.cloudstack.storage.utils.OntapStorageConstants; import org.apache.cloudstack.storage.utils.OntapStorageUtils; import org.apache.logging.log4j.LogManager; @@ -52,23 +57,13 @@ import java.util.Map; public class UnifiedNASStrategy extends NASStrategy { - private static final Logger logger = LogManager.getLogger(UnifiedNASStrategy.class); - private final FeignClientFactory feignClientFactory; - private final NASFeignClient nasFeignClient; - private final VolumeFeignClient volumeFeignClient; - private final JobFeignClient jobFeignClient; @Inject private VolumeDao volumeDao; @Inject private EndPointSelector epSelector; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; public UnifiedNASStrategy(OntapStorage ontapStorage) { super(ontapStorage); - String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getManagementLIF(); - this.feignClientFactory = new FeignClientFactory(); - this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL); - this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL ); - this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL ); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -77,7 +72,22 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { - return null; + logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume); + try { + // Step 1: set cloudstack volume metadata + String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo()); + // Step 2: Send command to KVM host to create qcow2 file using qemu-img + Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo()); + if (answer == null || !answer.getResult()) { + String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host"; + logger.error("createCloudStackVolume: " + errMsg); + throw new CloudRuntimeException(errMsg); + } + return cloudstackVolume; + }catch (Exception e) { + logger.error("createCloudStackVolume: error occured " + e); + throw new CloudRuntimeException(e); + } } @Override @@ -87,6 +97,19 @@ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { + logger.info("deleteCloudStackVolume: Delete cloudstack volume " + cloudstackVolume); + try { + // Step 1: Send command to KVM host to delete qcow2 file using qemu-img + Answer answer = deleteVolumeOnKVMHost(cloudstackVolume.getVolumeInfo()); + if (answer == null || !answer.getResult()) { + String errMsg = answer != null ? answer.getDetails() : "Failed to delete qcow2 on KVM host"; + logger.error("deleteCloudStackVolume: " + errMsg); + throw new CloudRuntimeException(errMsg); + } + }catch (Exception e) { + logger.error("deleteCloudStackVolume: error occured " + e); + throw new CloudRuntimeException(e); + } } @Override @@ -96,24 +119,40 @@ public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) { - return null; + logger.info("getCloudStackVolume: Get cloudstack volume " + cloudStackVolumeMap); + CloudStackVolume cloudStackVolume = null; + FileInfo fileInfo = getFile(cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID),cloudStackVolumeMap.get(OntapStorageConstants.FILE_PATH)); + + if(fileInfo != null){ + cloudStackVolume = new CloudStackVolume(); + cloudStackVolume.setFlexVolumeUuid(cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID)); + cloudStackVolume.setFile(fileInfo); + } else { + logger.warn("getCloudStackVolume: File not found for volume UUID: {} and file path: {}", cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID), cloudStackVolumeMap.get(OntapStorageConstants.FILE_PATH)); + } + + return cloudStackVolume; } @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { logger.info("createAccessGroup: Create access group {}: " , accessGroup); - Map details = accessGroup.getPrimaryDataStoreInfo().getDetails(); + + Map details = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId()); String svmName = details.get(OntapStorageConstants.SVM_NAME); String volumeUUID = details.get(OntapStorageConstants.VOLUME_UUID); String volumeName = details.get(OntapStorageConstants.VOLUME_NAME); + // Create the export policy ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName); try { ExportPolicy createdPolicy = createExportPolicy(svmName, policyRequest); - logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName()); + logger.info("createAccessGroup: ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName()); + // attach export policy to volume of storage pool assignExportPolicyToVolume(volumeUUID,createdPolicy.getName()); - storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), OntapStorageConstants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true); - storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), OntapStorageConstants.EXPORT_POLICY_NAME, createdPolicy.getName(), true); + // save the export policy details in storage pool details + storagePoolDetailsDao.addDetail(accessGroup.getStoragePoolId(), OntapStorageConstants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true); + storagePoolDetailsDao.addDetail(accessGroup.getStoragePoolId(), OntapStorageConstants.EXPORT_POLICY_NAME, createdPolicy.getName(), true); logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName); accessGroup.setPolicy(policyRequest); return accessGroup; @@ -128,23 +167,15 @@ public void deleteAccessGroup(AccessGroup accessGroup) { logger.info("deleteAccessGroup: Deleting export policy"); if (accessGroup == null) { - throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null"); + throw new CloudRuntimeException("Invalid accessGroup object - accessGroup is null"); } - PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo(); - if (primaryDataStoreInfo == null) { - throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup"); - } - logger.info("deleteAccessGroup: Deleting export policy for the storage pool {}", primaryDataStoreInfo.getName()); try { + Map details = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId()); String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); - String svmName = storage.getSvmName(); - String exportPolicyName = primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_NAME); - String exportPolicyId = primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_ID); - if (exportPolicyId == null || exportPolicyId.isEmpty()) { - logger.warn("deleteAccessGroup: Export policy ID not found in storage pool details for storage pool {}. Cannot delete export policy.", primaryDataStoreInfo.getName()); - throw new CloudRuntimeException("Export policy ID not found for storage pool: " + primaryDataStoreInfo.getName()); - } + // Determine export policy attached to the storage pool + String exportPolicyName = details.get(OntapStorageConstants.EXPORT_POLICY_NAME); + String exportPolicyId = details.get(OntapStorageConstants.EXPORT_POLICY_ID); try { nasFeignClient.deleteExportPolicyById(authHeader,exportPolicyId); @@ -152,6 +183,7 @@ public void deleteAccessGroup(AccessGroup accessGroup) { } catch (Exception e) { logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e); + } } catch (Exception e) { logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e); @@ -180,11 +212,11 @@ public void disableLogicalAccess(Map values) { @Override public Map getLogicalAccess(Map values) { - return null; + return Map.of(); } private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { - logger.info("Creating export policy: {} for SVM: {}", policy, svmName); + logger.info("createExportPolicy: Creating export policy: {} for SVM: {}", policy, svmName); try { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); @@ -197,18 +229,18 @@ private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " + "Received successful response but policy does not exist."); } - logger.info("Export policy created and verified successfully: " + policy.getName()); + logger.info("createExportPolicy: Export policy created and verified successfully: " + policy.getName()); } catch (FeignException e) { - logger.error("Failed to verify export policy creation: " + policy.getName(), e); + logger.error("createExportPolicy: Failed to verify export policy creation: " + policy.getName(), e); throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage()); } - logger.info("Export policy created successfully with name {}", policy.getName()); + logger.info("createExportPolicy: Export policy created successfully with name {}", policy.getName()); return policiesResponse.getRecords().get(0); } catch (FeignException e) { - logger.error("Failed to create export policy: {}", policy, e); + logger.error("createExportPolicy: Failed to create export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } catch (Exception e) { - logger.error("Exception while creating export policy: {}", policy, e); + logger.error("createExportPolicy: Exception while creating export policy: {}", policy, e); throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage()); } } @@ -231,6 +263,7 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid); } String jobUUID = jobResponse.getJob().getUuid(); + //Create URI for GET Job API int jobRetryCount = 0; Job createVolumeJob = null; while(createVolumeJob == null || !createVolumeJob.getState().equals(OntapStorageConstants.JOB_SUCCESS)) { @@ -252,19 +285,88 @@ private void assignExportPolicyToVolume(String volumeUuid, String policyName) { Thread.sleep(OntapStorageConstants.CREATE_VOLUME_CHECK_SLEEP_TIME); } } catch (Exception e) { - logger.error("Exception while updating volume: ", e); + logger.error("assignExportPolicyToVolume: Exception while updating volume: ", e); throw new CloudRuntimeException("Failed to update volume: " + e.getMessage()); } - logger.info("Export policy successfully assigned to volume: {}", volumeUuid); + logger.info("assignExportPolicyToVolume: Export policy successfully assigned to volume: {}", volumeUuid); } catch (FeignException e) { - logger.error("Failed to assign export policy to volume: {}", volumeUuid, e); + logger.error("assignExportPolicyToVolume: Failed to assign export policy to volume: {}", volumeUuid, e); throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); } catch (Exception e) { - logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e); + logger.error("assignExportPolicyToVolume: Exception while assigning export policy to volume: {}", volumeUuid, e); throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage()); } } + private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) { + logger.info("createFile: Creating file: {} in volume: {}", filePath, volumeUuid); + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo); + logger.info("createFile: File created successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + logger.error("createFile: Failed to create file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + logger.error("createFile: Exception while creating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private boolean deleteFile(String volumeUuid, String filePath) { + logger.info("deleteFile: Deleting file: {} from volume: {}", filePath, volumeUuid); + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.deleteFile(authHeader, volumeUuid, filePath); + logger.info("deleteFile: File deleted successfully: {} from volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + logger.error("deleteFile: Failed to delete file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e) { + logger.error("deleteFile: Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private OntapResponse getFileInfo(String volumeUuid, String filePath) { + logger.debug("getFileInfo: Getting file info for: {} in volume: {}", filePath, volumeUuid); + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); + logger.debug("getFileInfo: Retrieved file info for: {} in volume: {}", filePath, volumeUuid); + return response; + } catch (FeignException e){ + if (e.status() == 404) { + logger.debug("getFileInfo: File not found: {} in volume: {}", filePath, volumeUuid); + return null; + } + logger.error("getFileInfo: Failed to get file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } catch (Exception e){ + logger.error("getFileInfo: Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e); + throw new CloudRuntimeException("Failed to get file info: " + e.getMessage()); + } + } + + private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) { + logger.info("updateFile: Updating file: {} in volume: {}", filePath, volumeUuid); + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo); + logger.info("updateFile: File updated successfully: {} in volume: {}", filePath, volumeUuid); + return true; + } catch (FeignException e) { + logger.error("updateFile: Failed to update file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } catch (Exception e){ + logger.error("updateFile: Exception while updating file: {} in volume: {}", filePath, volumeUuid, e); + return false; + } + } + + private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){ String exportPolicyName = OntapStorageUtils.generateExportPolicyName(svmName,volumeName); @@ -280,13 +382,13 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv String ip = (hostStorageIp != null && !hostStorageIp.isEmpty()) ? hostStorageIp : host.getPrivateIpAddress(); - String ipToUse = ip + "/31"; + String ipToUse = ip + "/32"; ExportRule.ExportClient exportClient = new ExportRule.ExportClient(); exportClient.setMatch(ipToUse); exportClients.add(exportClient); } exportRule.setClients(exportClients); - exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.ANY)); + exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.NFS3)); exportRule.setRoRule(List.of("sys")); exportRule.setRwRule(List.of("sys")); exportRule.setSuperuser(List.of("sys")); @@ -300,4 +402,153 @@ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String sv return exportPolicy; } + + private String updateCloudStackVolumeMetadata(String dataStoreId, DataObject volumeInfo) { + logger.info("updateCloudStackVolumeMetadata called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo ); + try { + VolumeObject volumeObject = (VolumeObject) volumeInfo; + long volumeId = volumeObject.getId(); + logger.info("updateCloudStackVolumeMetadata: VolumeInfo ID from VolumeObject: {}", volumeId); + VolumeVO volume = volumeDao.findById(volumeId); + if (volume == null) { + throw new CloudRuntimeException("Volume not found with id: " + volumeId); + } + String volumeUuid = volumeInfo.getUuid(); + volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + volume.setPoolId(Long.parseLong(dataStoreId)); + volume.setPath(volumeUuid); // Filename for qcow2 file + volumeDao.update(volume.getId(), volume); + logger.info("Updated volume path to {} for volume ID {}", volumeUuid, volumeId); + return volumeUuid; + }catch (Exception e){ + logger.error("updateCloudStackVolumeMetadata: Exception while updating volumeInfo: {} in volume: {}", dataStoreId, volumeInfo.getUuid(), e); + throw new CloudRuntimeException("Exception while updating volumeInfo: " + e.getMessage()); + } + } + + private Answer createVolumeOnKVMHost(DataObject volumeInfo) { + logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo); + + try { + logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid()); + CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO()); + EndPoint ep = epSelector.select(volumeInfo); + if (ep == null) { + String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up"; + logger.error(errMsg); + return new Answer(cmd, false, errMsg); + } + logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); + Answer answer = ep.sendMessage(cmd); + if (answer != null && answer.getResult()) { + logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host"); + } else { + logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}", + answer != null ? answer.getDetails() : "null answer"); + } + return answer; + } catch (Exception e) { + logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e); + return new Answer(null, false, e.toString()); + } + } + + private Answer deleteVolumeOnKVMHost(DataObject volumeInfo) { + logger.info("deleteVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo); + + try { + logger.info("deleteVolumeOnKVMHost: Sending DeleteCommand to KVM agent for volume: {}", volumeInfo.getUuid()); + DeleteCommand cmd = new DeleteCommand(volumeInfo.getTO()); + EndPoint ep = epSelector.select(volumeInfo); + if (ep == null) { + String errMsg = "No remote endpoint to send DeleteCommand, check if host is up"; + logger.error(errMsg); + return new Answer(cmd, false, errMsg); + } + logger.info("deleteVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr()); + Answer answer = ep.sendMessage(cmd); + if (answer != null && answer.getResult()) { + logger.info("deleteVolumeOnKVMHost: Successfully deleted qcow2 file on KVM host"); + } else { + logger.error("deleteVolumeOnKVMHost: Failed to delete qcow2 file: {}", + answer != null ? answer.getDetails() : "null answer"); + } + return answer; + } catch (Exception e) { + logger.error("deleteVolumeOnKVMHost: Exception sending DeleteCommand", e); + return new Answer(null, false, e.toString()); + } + } + + private FileInfo getFile(String volumeUuid, String filePath) { + logger.info("Get File: {} for volume: {}", filePath, volumeUuid); + + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse fileResponse = null; + try { + fileResponse = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath); + if (fileResponse == null || fileResponse.getRecords().isEmpty()) { + throw new CloudRuntimeException("File " + filePath + " not found on ONTAP. " + + "Received successful response but file does not exist."); + } + } catch (FeignException e) { + logger.error("getFile: Failed to get file response: " + filePath, e); + throw new CloudRuntimeException("File not found: " + e.getMessage()); + } catch (Exception e) { + logger.error("getFile: Exception to get file: {}", filePath, e); + throw new CloudRuntimeException("Failed to get the file: " + e.getMessage()); + } + logger.info("getFile: File retrieved successfully with name {}", filePath); + return fileResponse.getRecords().get(0); + } + + /** + * Reverts a file to a snapshot using the ONTAP CLI-based snapshot file restore API. + * + *

ONTAP REST API (CLI passthrough): + * {@code POST /api/private/cli/volume/snapshot/restore-file}

+ * + *

This method uses the CLI native API which is more reliable and works + * consistently for both NFS files and iSCSI LUNs.

+ * + * @param snapshotName The ONTAP FlexVolume snapshot name + * @param flexVolUuid The FlexVolume UUID (not used in CLI API, kept for interface consistency) + * @param snapshotUuid The ONTAP snapshot UUID (not used in CLI API, kept for interface consistency) + * @param volumePath The file path within the FlexVolume + * @param lunUuid Not used for NFS (null) + * @param flexVolName The FlexVolume name (required for CLI API) + * @return JobResponse for the async restore operation + */ + @Override + public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid, + String snapshotUuid, String volumePath, + String lunUuid, String flexVolName) { + logger.info("revertSnapshotForCloudStackVolume [NFS]: Restoring file [{}] from snapshot [{}] on FlexVol [{}]", + volumePath, snapshotName, flexVolName); + + if (snapshotName == null || snapshotName.isEmpty()) { + throw new CloudRuntimeException("Snapshot name is required for NFS snapshot revert"); + } + if (volumePath == null || volumePath.isEmpty()) { + throw new CloudRuntimeException("File path is required for NFS snapshot revert"); + } + if (flexVolName == null || flexVolName.isEmpty()) { + throw new CloudRuntimeException("FlexVolume name is required for NFS snapshot revert"); + } + + String authHeader = getAuthHeader(); + String svmName = storage.getSvmName(); + + // Prepare the file path for ONTAP CLI API (ensure it starts with "/") + String ontapFilePath = volumePath.startsWith("/") ? volumePath : "/" + volumePath; + + // Create CLI snapshot restore request + CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest( + svmName, flexVolName, snapshotName, ontapFilePath); + + logger.info("revertSnapshotForCloudStackVolume: Calling CLI file restore API with vserver={}, volume={}, snapshot={}, path={}", + svmName, flexVolName, snapshotName, ontapFilePath); + + return getSnapshotFeignClient().restoreFileFromSnapshotCli(authHeader, restoreRequest); + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 9814f3b9a93c..af7410be10c4 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -20,24 +20,27 @@ package org.apache.cloudstack.storage.service; import com.cloud.host.HostVO; -import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.storage.feign.FeignClientFactory; -import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import feign.FeignException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunMap; +import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.cloudstack.storage.utils.OntapStorageConstants; import org.apache.cloudstack.storage.utils.OntapStorageUtils; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - +import javax.inject.Inject; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -45,14 +48,12 @@ public class UnifiedSANStrategy extends SANStrategy { private static final Logger logger = LogManager.getLogger(UnifiedSANStrategy.class); - private final FeignClientFactory feignClientFactory; - private final SANFeignClient sanFeignClient; + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; public UnifiedSANStrategy(OntapStorage ontapStorage) { super(ontapStorage); - String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getManagementLIF(); - this.feignClientFactory = new FeignClientFactory(); - this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL); + String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getStorageIP(); } public void setOntapStorage(OntapStorage ontapStorage) { @@ -61,7 +62,36 @@ public void setOntapStorage(OntapStorage ontapStorage) { @Override public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) { - return null; + logger.info("createCloudStackVolume : Creating Lun with cloudstackVolume request {} ", cloudstackVolume); + if (cloudstackVolume == null || cloudstackVolume.getLun() == null) { + logger.error("createCloudStackVolume: LUN creation failed. Invalid request: {}", cloudstackVolume); + throw new CloudRuntimeException(" Failed to create Lun, invalid request"); + } + try { + // Get AuthHeader + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // Create URI for lun creation + //TODO: It is possible that Lun creation will take time and we may need to handle through async job. + OntapResponse createdLun = sanFeignClient.createLun(authHeader, true, cloudstackVolume.getLun()); + if (createdLun == null || createdLun.getRecords() == null || createdLun.getRecords().size() == 0) { + logger.error("createCloudStackVolume: LUN creation failed for Lun {}", cloudstackVolume.getLun().getName()); + throw new CloudRuntimeException("Failed to create Lun: " + cloudstackVolume.getLun().getName()); + } + Lun lun = createdLun.getRecords().get(0); + logger.debug("createCloudStackVolume: LUN created successfully. Lun: {}", lun); + logger.info("createCloudStackVolume: LUN created successfully. LunName: {}", lun.getName()); + + CloudStackVolume createdCloudStackVolume = new CloudStackVolume(); + createdCloudStackVolume.setLun(lun); + return createdCloudStackVolume; + } catch (FeignException e) { + logger.error("FeignException occurred while creating LUN: {}, Status: {}, Exception: {}", + cloudstackVolume.getLun().getName(), e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage()); + } catch (Exception e) { + logger.error("Exception occurred while creating LUN: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); + throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage()); + } } @Override @@ -70,47 +100,104 @@ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {} + public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { + if (cloudstackVolume == null || cloudstackVolume.getLun() == null) { + logger.error("deleteCloudStackVolume: Lun deletion failed. Invalid request: {}", cloudstackVolume); + throw new CloudRuntimeException(" Failed to delete Lun, invalid request"); + } + logger.info("deleteCloudStackVolume : Deleting Lun: {}", cloudstackVolume.getLun().getName()); + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + Map queryParams = Map.of("allow_delete_while_mapped", "true"); + try { + sanFeignClient.deleteLun(authHeader, cloudstackVolume.getLun().getUuid(), queryParams); + } catch (FeignException feignEx) { + if (feignEx.status() == 404) { + logger.warn("deleteCloudStackVolume: Lun {} does not exist (status 404), skipping deletion", cloudstackVolume.getLun().getName()); + return; + } + throw feignEx; + } + logger.info("deleteCloudStackVolume: Lun deleted successfully. LunName: {}", cloudstackVolume.getLun().getName()); + } catch (Exception e) { + logger.error("Exception occurred while deleting Lun: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); + throw new CloudRuntimeException("Failed to delete Lun: " + e.getMessage()); + } + } @Override public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {} @Override public CloudStackVolume getCloudStackVolume(Map values) { - return null; + logger.info("getCloudStackVolume : fetching Lun"); + logger.debug("getCloudStackVolume : fetching Lun with params {} ", values); + if (values == null || values.isEmpty()) { + logger.error("getCloudStackVolume: get Lun failed. Invalid request: {}", values); + throw new CloudRuntimeException(" get Lun Failed, invalid request"); + } + String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME); + String lunName = values.get(OntapStorageConstants.NAME); + if (svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { + logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or Lun name: {}", svmName, lunName); + throw new CloudRuntimeException("Failed to get Lun, invalid request"); + } + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + Map queryParams = Map.of(OntapStorageConstants.SVM_DOT_NAME, svmName, OntapStorageConstants.NAME, lunName); + OntapResponse lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams); + if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().isEmpty()) { + logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found. Returning null.", lunName, svmName); + return null; + } + Lun lun = lunResponse.getRecords().get(0); + logger.debug("getCloudStackVolume: Lun Details : {}", lun); + logger.info("getCloudStackVolume: Fetched the Lun successfully. LunName: {}", lun.getName()); + + CloudStackVolume cloudStackVolume = new CloudStackVolume(); + cloudStackVolume.setLun(lun); + return cloudStackVolume; + } catch (FeignException e) { + if (e.status() == 404) { + logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found (status 404). Returning null.", lunName, svmName); + return null; + } + logger.error("FeignException occurred while fetching Lun, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); + } catch (Exception e) { + logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); + } } @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { - logger.info("createAccessGroup : Create Igroup"); - String igroupName = "unknown"; logger.debug("createAccessGroup : Creating Igroup with access group request {} ", accessGroup); if (accessGroup == null) { logger.error("createAccessGroup: Igroup creation failed. Invalid request: {}", accessGroup); - throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid request"); + throw new CloudRuntimeException(" Failed to create Igroup, invalid request"); } + // Get StoragePool details + if (accessGroup.getStoragePoolId() == null) { + throw new CloudRuntimeException(" Failed to create Igroup, invalid datastore details in the request"); + } + if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) { + throw new CloudRuntimeException(" Failed to create Igroup, no hosts to connect provided in the request"); + } + + String igroupName = null; try { - if (accessGroup.getPrimaryDataStoreInfo() == null || accessGroup.getPrimaryDataStoreInfo().getDetails() == null - || accessGroup.getPrimaryDataStoreInfo().getDetails().isEmpty()) { - throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid datastore details in the request"); - } - Map dataStoreDetails = accessGroup.getPrimaryDataStoreInfo().getDetails(); + Map dataStoreDetails = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId()); logger.debug("createAccessGroup: Successfully fetched datastore details."); - String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); - + // Generate Igroup request Igroup igroupRequest = new Igroup(); - List hostsIdentifier = new ArrayList<>(); String svmName = dataStoreDetails.get(OntapStorageConstants.SVM_NAME); - igroupName = OntapStorageUtils.getIgroupName(svmName, accessGroup.getScope().getScopeType(), accessGroup.getScope().getScopeId()); - Hypervisor.HypervisorType hypervisorType = accessGroup.getPrimaryDataStoreInfo().getHypervisor(); - ProtocolType protocol = ProtocolType.valueOf(dataStoreDetails.get(OntapStorageConstants.PROTOCOL)); - if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) { - throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, no hosts to connect provided in the request"); - } - if (!validateProtocolSupportAndFetchHostsIdentifier(accessGroup.getHostsToConnect(), protocol, hostsIdentifier)) { - String errMsg = "createAccessGroup: Not all hosts in the " + accessGroup.getScope().getScopeType().toString() + " support the protocol: " + protocol.name(); + + // Check if all hosts support the protocol + if (!validateProtocolSupport(accessGroup.getHostsToConnect(), protocol)) { + String errMsg = " Not all hosts " + " support the protocol: " + protocol.name(); throw new CloudRuntimeException(errMsg); } @@ -119,41 +206,43 @@ public AccessGroup createAccessGroup(AccessGroup accessGroup) { svm.setName(svmName); igroupRequest.setSvm(svm); } + // TODO: Defaulting to LINUX for zone scope for now, this has to be revisited when we support other hypervisors + igroupRequest.setOsType(Igroup.OsTypeEnum.Linux); - if (igroupName != null && !igroupName.isEmpty()) { + for (HostVO host : accessGroup.getHostsToConnect()) { + igroupName = OntapStorageUtils.getIgroupName(svmName, host.getName()); igroupRequest.setName(igroupName); - } - - igroupRequest.setOsType(Igroup.OsTypeEnum.Linux); - if (hostsIdentifier != null && hostsIdentifier.size() > 0) { List initiators = new ArrayList<>(); - for (String hostIdentifier : hostsIdentifier) { - Initiator initiator = new Initiator(); - initiator.setName(hostIdentifier); - initiators.add(initiator); - } + Initiator initiator = new Initiator(); + initiator.setName(host.getStorageUrl());// CloudStack has one iqn for one host + initiators.add(initiator); igroupRequest.setInitiators(initiators); + igroupRequest.setDeleteOnUnmap(true); + igroupRequest.setDeleteOnUnmap(true); } - igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf("iscsi")); + igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf(OntapStorageConstants.ISCSI)); + // Create Igroup logger.debug("createAccessGroup: About to call sanFeignClient.createIgroup with igroupName: {}", igroupName); AccessGroup createdAccessGroup = new AccessGroup(); OntapResponse createdIgroup = null; try { + // Get AuthHeader + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); createdIgroup = sanFeignClient.createIgroup(authHeader, true, igroupRequest); - } catch (Exception feignEx) { - String errMsg = feignEx.getMessage(); - if (errMsg != null && errMsg.contains(("5374023"))) { - logger.warn("createAccessGroup: Igroup with name {} already exists. Fetching existing Igroup.", igroupName); + } catch (FeignException feignEx) { + if (feignEx.status() == 409) { + logger.warn("createAccessGroup: Igroup with name {} already exists (status 409). Fetching existing Igroup.", igroupName); + // TODO: Currently we aren't doing anything with the returned AccessGroup object, so, haven't added code here to fetch the existing Igroup and set it in AccessGroup. return createdAccessGroup; } - logger.error("createAccessGroup: Exception during Feign call: {}", feignEx.getMessage(), feignEx); + logger.error("createAccessGroup: FeignException during Igroup creation: Status: {}, Exception: {}", feignEx.status(), feignEx.getMessage(), feignEx); throw feignEx; } logger.debug("createAccessGroup: createdIgroup: {}", createdIgroup); logger.debug("createAccessGroup: createdIgroup Records: {}", createdIgroup.getRecords()); - if (createdIgroup == null || createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) { + if (createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) { logger.error("createAccessGroup: Igroup creation failed for Igroup Name {}", igroupName); throw new CloudRuntimeException("Failed to create Igroup: " + igroupName); } @@ -175,82 +264,77 @@ public void deleteAccessGroup(AccessGroup accessGroup) { logger.info("deleteAccessGroup: Deleting iGroup"); if (accessGroup == null) { - throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null"); + logger.error("deleteAccessGroup: Igroup deletion failed. Invalid request: {}", accessGroup); + throw new CloudRuntimeException(" Failed to delete Igroup, invalid request"); } - - PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo(); - if (primaryDataStoreInfo == null) { - throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup"); + // Get StoragePool details + if (accessGroup.getStoragePoolId() == null) { + throw new CloudRuntimeException(" Failed to delete Igroup, invalid datastore details in the request"); } - try { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); - String svmName = storage.getSvmName(); - - String igroupName; - if (primaryDataStoreInfo.getClusterId() != null) { - igroupName = OntapStorageUtils.getIgroupName(svmName, com.cloud.storage.ScopeType.CLUSTER, primaryDataStoreInfo.getClusterId()); - logger.info("deleteAccessGroup: Deleting cluster-scoped iGroup '{}'", igroupName); - } else { - igroupName = OntapStorageUtils.getIgroupName(svmName, com.cloud.storage.ScopeType.ZONE, primaryDataStoreInfo.getDataCenterId()); - logger.info("deleteAccessGroup: Deleting zone-scoped iGroup '{}'", igroupName); - } - - Map igroupParams = Map.of( - OntapStorageConstants.SVM_DOT_NAME, svmName, - OntapStorageConstants.NAME, igroupName - ); - - try { - OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams); - if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) { - logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName); - return; - } - - Igroup igroup = igroupResponse.getRecords().get(0); - String igroupUuid = igroup.getUuid(); - - if (igroupUuid == null || igroupUuid.isEmpty()) { - throw new CloudRuntimeException("deleteAccessGroup: iGroup UUID is null or empty for iGroup: " + igroupName); - } - - logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid); - - sanFeignClient.deleteIgroup(authHeader, igroupUuid); - - logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName); - - } catch (Exception e) { - String errorMsg = e.getMessage(); - if (errorMsg != null && (errorMsg.contains("5374852") || errorMsg.contains("not found"))) { - logger.warn("deleteAccessGroup: iGroup '{}' does not exist, skipping deletion", igroupName); - } else { - throw e; + //Get iGroup name per host + if(!CollectionUtils.isEmpty(accessGroup.getHostsToConnect())) { + for (HostVO host : accessGroup.getHostsToConnect()) { + String igroupName = OntapStorageUtils.getIgroupName(svmName, host.getName()); + logger.info("deleteAccessGroup: iGroup name '{}'", igroupName); + + // Get the iGroup to retrieve its UUID + Map igroupParams = Map.of( + OntapStorageConstants.SVM_DOT_NAME, svmName, + OntapStorageConstants.NAME, igroupName + ); + + try { + OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams); + if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) { + logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName); + return; + } + + Igroup igroup = igroupResponse.getRecords().get(0); + String igroupUuid = igroup.getUuid(); + + if (igroupUuid == null || igroupUuid.isEmpty()) { + throw new CloudRuntimeException(" iGroup UUID is null or empty for iGroup: " + igroupName); + } + + logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid); + + // Delete the iGroup using the UUID + sanFeignClient.deleteIgroup(authHeader, igroupUuid); + + logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName); + + } catch (FeignException e) { + if (e.status() == 404) { + logger.warn("deleteAccessGroup: iGroup '{}' does not exist (status 404), skipping deletion", igroupName); + } else { + logger.error("deleteAccessGroup: FeignException occurred: Status: {}, Exception: {}", e.status(), e.getMessage(), e); + throw e; + } + } catch (Exception e) { + logger.error("deleteAccessGroup: Exception occurred: {}", e.getMessage(), e); + throw e; + } } } - + } catch (FeignException e) { + logger.error("deleteAccessGroup: FeignException occurred while deleting iGroup. Status: {}, Exception: {}", e.status(), e.getMessage(), e); + throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e); } catch (Exception e) { logger.error("deleteAccessGroup: Failed to delete iGroup. Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e); } } - private boolean validateProtocolSupportAndFetchHostsIdentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) { - switch (protocolType) { - case ISCSI: - String protocolPrefix = OntapStorageConstants.IQN; - for (HostVO host : hosts) { - if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty() - || !host.getStorageUrl().startsWith(protocolPrefix)) { - return false; - } - hostIdentifiers.add(host.getStorageUrl()); - } - break; - default: - throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name()); + private boolean validateProtocolSupport(List hosts, ProtocolType protocolType) { + String protocolPrefix = OntapStorageConstants.IQN; + for (HostVO host : hosts) { + if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty() || !host.getStorageUrl().startsWith(protocolPrefix)) { + return false; + } } logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name()); return true; @@ -261,18 +345,19 @@ public AccessGroup updateAccessGroup(AccessGroup accessGroup) { return null; } + @Override public AccessGroup getAccessGroup(Map values) { logger.info("getAccessGroup : fetch Igroup"); logger.debug("getAccessGroup : fetching Igroup with params {} ", values); if (values == null || values.isEmpty()) { logger.error("getAccessGroup: get Igroup failed. Invalid request: {}", values); - throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request"); + throw new CloudRuntimeException(" get Igroup Failed, invalid request"); } String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME); String igroupName = values.get(OntapStorageConstants.NAME); if (svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) { logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName); - throw new CloudRuntimeException("getAccessGroup : Failed to get Igroup, invalid request"); + throw new CloudRuntimeException(" Failed to get Igroup, invalid request"); } try { String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); @@ -286,24 +371,229 @@ public AccessGroup getAccessGroup(Map values) { AccessGroup accessGroup = new AccessGroup(); accessGroup.setIgroup(igroup); return accessGroup; - } catch (Exception e) { - String errMsg = e.getMessage(); - if (errMsg != null && errMsg.contains("not found")) { - logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' ({}). Returning null.", igroupName, svmName, errMsg); + } catch (FeignException e) { + if (e.status() == 404) { + logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' (status 404). Returning null.", igroupName, svmName); return null; } - logger.error("Exception occurred while fetching Igroup, Exception: {}", errMsg); - throw new CloudRuntimeException("Failed to fetch Igroup details: " + errMsg); + logger.error("FeignException occurred while fetching Igroup, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); + } catch (Exception e) { + logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); } } public Map enableLogicalAccess(Map values) { - return null; + logger.info("enableLogicalAccess : Create LunMap"); + logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values); + Map response = null; + if (values == null) { + logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: null"); + throw new CloudRuntimeException(" Failed to create LunMap, invalid request"); + } + String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME); + String lunName = values.get(OntapStorageConstants.LUN_DOT_NAME); + String igroupName = values.get(OntapStorageConstants.IGROUP_DOT_NAME); + if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { + logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: {}", values); + throw new CloudRuntimeException(" Failed to create LunMap, invalid request"); + } + try { + // Get AuthHeader + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // Create LunMap + LunMap lunMapRequest = new LunMap(); + Svm svm = new Svm(); + svm.setName(svmName); + lunMapRequest.setSvm(svm); + //Set Lun name + Lun lun = new Lun(); + lun.setName(lunName); + lunMapRequest.setLun(lun); + //Set Igroup name + Igroup igroup = new Igroup(); + igroup.setName(igroupName); + lunMapRequest.setIgroup(igroup); + try { + sanFeignClient.createLunMap(authHeader, true, lunMapRequest); + } catch (Exception feignEx) { + String errMsg = feignEx.getMessage(); + if (errMsg != null && errMsg.contains(("LUN already mapped to this group"))) { + logger.warn("enableLogicalAccess: LunMap for Lun: {} and igroup: {} already exists.", lunName, igroupName); + } else { + logger.error("enableLogicalAccess: Exception during Feign call: {}", feignEx.getMessage(), feignEx); + throw feignEx; + } + } + // Get the LunMap details + OntapResponse lunMapResponse = null; + try { + lunMapResponse = sanFeignClient.getLunMapResponse(authHeader, + Map.of( + OntapStorageConstants.SVM_DOT_NAME, svmName, + OntapStorageConstants.LUN_DOT_NAME, lunName, + OntapStorageConstants.IGROUP_DOT_NAME, igroupName, + OntapStorageConstants.FIELDS, OntapStorageConstants.LOGICAL_UNIT_NUMBER + )); + response = Map.of( + OntapStorageConstants.LOGICAL_UNIT_NUMBER, lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString() + ); + } catch (Exception e) { + logger.error("enableLogicalAccess: Failed to fetch LunMap details for Lun: {} and igroup: {}, Exception: {}", lunName, igroupName, e); + throw new CloudRuntimeException("Failed to fetch LunMap details for Lun: " + lunName + " and igroup: " + igroupName); + } + logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMapResponse.getRecords().get(0)); + logger.info("enableLogicalAccess: LunMap created successfully."); + } catch (Exception e) { + logger.error("Exception occurred while creating LunMap", e); + throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); + } + return response; } - public void disableLogicalAccess(Map values) {} + public void disableLogicalAccess(Map values) { + logger.info("disableLogicalAccess : Delete LunMap"); + logger.debug("disableLogicalAccess : Deleting LunMap with values {} ", values); + if (values == null) { + logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: null"); + throw new CloudRuntimeException(" Failed to delete LunMap, invalid request"); + } + String lunUUID = values.get(OntapStorageConstants.LUN_DOT_UUID); + String igroupUUID = values.get(OntapStorageConstants.IGROUP_DOT_UUID); + if (lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) { + logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: {}", values); + throw new CloudRuntimeException(" Failed to delete LunMap, invalid request"); + } + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); + logger.info("disableLogicalAccess: LunMap deleted successfully."); + } catch (FeignException e) { + if (e.status() == 404) { + logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist, skipping deletion", lunUUID, igroupUUID); + return; + } + logger.error("FeignException occurred while deleting LunMap, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); + } catch (Exception e) { + logger.error("Exception occurred while deleting LunMap, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); + } + } + // GET-only helper: fetch LUN-map and return logical unit number if it exists; otherwise return null public Map getLogicalAccess(Map values) { + logger.info("getLogicalAccess : Fetch LunMap"); + logger.debug("getLogicalAccess : Fetching LunMap with values {} ", values); + if (values == null) { + logger.error("getLogicalAccess: Invalid request values: null"); + throw new CloudRuntimeException(" Invalid request"); + } + String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME); + String lunName = values.get(OntapStorageConstants.LUN_DOT_NAME); + String igroupName = values.get(OntapStorageConstants.IGROUP_DOT_NAME); + if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { + logger.error("getLogicalAccess: Invalid request values: {}", values); + throw new CloudRuntimeException(" Invalid request"); + } + try { + String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse lunMapResponse = sanFeignClient.getLunMapResponse(authHeader, + Map.of( + OntapStorageConstants.SVM_DOT_NAME, svmName, + OntapStorageConstants.LUN_DOT_NAME, lunName, + OntapStorageConstants.IGROUP_DOT_NAME, igroupName, + OntapStorageConstants.FIELDS, OntapStorageConstants.LOGICAL_UNIT_NUMBER + )); + if (lunMapResponse != null && lunMapResponse.getRecords() != null && !lunMapResponse.getRecords().isEmpty()) { + String lunNumber = lunMapResponse.getRecords().get(0).getLogicalUnitNumber() != null ? + lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString() : null; + return lunNumber != null ? Map.of(OntapStorageConstants.LOGICAL_UNIT_NUMBER, lunNumber) : null; + } + } catch (Exception e) { + logger.warn("getLogicalAccess: LunMap not found for Lun: {} and igroup: {} ({}).", lunName, igroupName, e.getMessage()); + } return null; } + + @Override + public String ensureLunMapped(String svmName, String lunName, String accessGroupName) { + logger.info("ensureLunMapped: Ensuring LUN [{}] is mapped to igroup [{}] on SVM [{}]", lunName, accessGroupName, svmName); + + // Check existing map first + Map getMap = Map.of( + OntapStorageConstants.LUN_DOT_NAME, lunName, + OntapStorageConstants.SVM_DOT_NAME, svmName, + OntapStorageConstants.IGROUP_DOT_NAME, accessGroupName + ); + Map mapResp = getLogicalAccess(getMap); + if (mapResp != null && mapResp.containsKey(OntapStorageConstants.LOGICAL_UNIT_NUMBER)) { + String lunNumber = mapResp.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER); + logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber); + return lunNumber; + } + + // Create if not exists + Map enableMap = Map.of( + OntapStorageConstants.LUN_DOT_NAME, lunName, + OntapStorageConstants.SVM_DOT_NAME, svmName, + OntapStorageConstants.IGROUP_DOT_NAME, accessGroupName + ); + Map response = enableLogicalAccess(enableMap); + if (response == null || !response.containsKey(OntapStorageConstants.LOGICAL_UNIT_NUMBER)) { + throw new CloudRuntimeException("Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]"); + } + logger.info("ensureLunMapped: Successfully mapped LUN [{}] to igroup [{}] with LUN number [{}]", lunName, accessGroupName, response.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER)); + return response.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER); + } + /** + * Reverts a LUN to a snapshot using the ONTAP CLI-based snapshot file restore API. + * + *

ONTAP REST API (CLI passthrough): + * {@code POST /api/private/cli/volume/snapshot/restore-file}

+ * + *

This method uses the CLI native API which is more reliable and works + * consistently for both NFS files and iSCSI LUNs.

+ * + * @param snapshotName The ONTAP FlexVolume snapshot name + * @param flexVolUuid The FlexVolume UUID (not used in CLI API, kept for interface consistency) + * @param snapshotUuid The ONTAP snapshot UUID (not used in CLI API, kept for interface consistency) + * @param volumePath The LUN name (used to construct the path) + * @param lunUuid The LUN UUID (not used in CLI API, kept for interface consistency) + * @param flexVolName The FlexVolume name (required for CLI API) + * @return JobResponse for the async restore operation + */ + @Override + public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid, + String snapshotUuid, String volumePath, + String lunUuid, String flexVolName) { + logger.info("revertSnapshotForCloudStackVolume [iSCSI]: Restoring LUN [{}] from snapshot [{}] on FlexVol [{}]", + volumePath, snapshotName, flexVolName); + + if (snapshotName == null || snapshotName.isEmpty()) { + throw new CloudRuntimeException("Snapshot name is required for iSCSI snapshot revert"); + } + if (flexVolName == null || flexVolName.isEmpty()) { + throw new CloudRuntimeException("FlexVolume name is required for iSCSI snapshot revert"); + } + if (volumePath == null || volumePath.isEmpty()) { + throw new CloudRuntimeException("LUN path is required for iSCSI snapshot revert"); + } + + String authHeader = getAuthHeader(); + String svmName = storage.getSvmName(); + + // Prepare the LUN path for ONTAP CLI API (ensure it starts with "/") + String ontapLunPath = volumePath.startsWith("/") ? volumePath : "/" + volumePath; + + // Create CLI snapshot restore request + CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest( + svmName, flexVolName, snapshotName, ontapLunPath); + + logger.info("revertSnapshotForCloudStackVolume: Calling CLI file restore API with vserver={}, volume={}, snapshot={}, path={}", + svmName, flexVolName, snapshotName, ontapLunPath); + + return getSnapshotFeignClient().restoreFileFromSnapshotCli(authHeader, restoreRequest); + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java index 9ff80e7cf8a9..975a74df85aa 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java @@ -20,7 +20,6 @@ package org.apache.cloudstack.storage.service.model; import com.cloud.host.HostVO; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.Igroup; @@ -33,7 +32,7 @@ public class AccessGroup { private ExportPolicy exportPolicy; private List hostsToConnect; - private PrimaryDataStoreInfo primaryDataStoreInfo; + private Long storagePoolId; private Scope scope; @@ -58,12 +57,15 @@ public List getHostsToConnect() { public void setHostsToConnect(List hostsToConnect) { this.hostsToConnect = hostsToConnect; } - public PrimaryDataStoreInfo getPrimaryDataStoreInfo() { - return primaryDataStoreInfo; + + public Long getStoragePoolId() { + return storagePoolId; } - public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) { - this.primaryDataStoreInfo = primaryDataStoreInfo; + + public void setStoragePoolId(Long storagePoolId) { + this.storagePoolId = storagePoolId; } + public Scope getScope() { return scope; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java index 6c51e4630800..3edf02000cf2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java @@ -25,9 +25,28 @@ public class CloudStackVolume { + /** + * Filed used for request: + * a. snapshot workflows will get source file details from it. + */ private FileInfo file; + + /** + * Filed used for request: + * a. snapshot workflows will get source LUN details from it. + */ private Lun lun; private String datastoreId; + /** + * FlexVolume UUID on which this cloudstack volume is created. + * a. Field is eligible for unified storage only. + * b. It will be null for the disaggregated storage. + */ + private String flexVolumeUuid; + /** + * Field serves for snapshot workflows + */ + private String destinationPath; private DataObject volumeInfo; // This is needed as we need DataObject to be passed to agent to create volume public FileInfo getFile() { return file; @@ -56,4 +75,14 @@ public DataObject getVolumeInfo() { public void setVolumeInfo(DataObject volumeInfo) { this.volumeInfo = volumeInfo; } + public String getFlexVolumeUuid() { + return flexVolumeUuid; + } + public void setFlexVolumeUuid(String flexVolumeUuid) { + this.flexVolumeUuid = flexVolumeUuid; + } + + public String getDestinationPath() { return this.destinationPath; } + public void setDestinationPath(String destinationPath) { this.destinationPath = destinationPath; } + } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageConstants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageConstants.java index 0cf0a9b07e0f..2d6e4a4530ea 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageConstants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageConstants.java @@ -22,7 +22,7 @@ public class OntapStorageConstants { - public static final String ONTAP_PLUGIN_NAME = "ONTAP"; + public static final String ONTAP_PLUGIN_NAME = "NetApp ONTAP"; public static final int NFS3_PORT = 2049; public static final int ISCSI_PORT = 3260; @@ -34,7 +34,7 @@ public class OntapStorageConstants { public static final String USERNAME = "username"; public static final String PASSWORD = "password"; public static final String DATA_LIF = "dataLIF"; - public static final String MANAGEMENT_LIF = "managementLIF"; + public static final String STORAGE_IP = "storageIP"; public static final String VOLUME_NAME = "volumeName"; public static final String VOLUME_UUID = "volumeUUID"; public static final String EXPORT_POLICY_ID = "exportPolicyId"; @@ -42,6 +42,8 @@ public class OntapStorageConstants { public static final String IS_DISAGGREGATED = "isDisaggregated"; public static final String RUNNING = "running"; public static final String EXPORT = "export"; + public static final String NFS_MOUNT_OPTIONS = "nfsmountopts"; + public static final String NFS3_MOUNT_OPTIONS_VER_3 = "vers=3"; public static final int ONTAP_PORT = 443; @@ -90,4 +92,16 @@ public class OntapStorageConstants { public static final String IGROUP_DOT_UUID = "igroup.uuid"; public static final String UNDERSCORE = "_"; public static final String CS = "cs"; + public static final String SRC_CS_VOLUME_ID = "src_cs_volume_id"; + public static final String BASE_ONTAP_FV_ID = "base_ontap_fv_id"; + public static final String ONTAP_SNAP_ID = "ontap_snap_id"; + public static final String ONTAP_SNAP_NAME = "ontap_snap_name"; + public static final String VOLUME_PATH = "volume_path"; + public static final String PRIMARY_POOL_ID = "primary_pool_id"; + public static final String ONTAP_SNAP_SIZE = "ontap_snap_size"; + public static final String FILE_PATH = "file_path"; + public static final int MAX_SNAPSHOT_NAME_LENGTH = 64; + + /** vm_snapshot_details key for ONTAP FlexVolume-level VM snapshots. */ + public static final String ONTAP_FLEXVOL_SNAPSHOT = "ontapFlexVolSnapshot"; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageUtils.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageUtils.java index 0924cf3b9bb6..ae2663aa4620 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageUtils.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/OntapStorageUtils.java @@ -19,12 +19,18 @@ package org.apache.cloudstack.storage.utils; -import com.cloud.storage.ScopeType; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunSpace; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -36,25 +42,92 @@ public class OntapStorageUtils { private static final Logger logger = LogManager.getLogger(OntapStorageUtils.class); - private static final String BASIC = "Basic"; private static final String AUTH_HEADER_COLON = ":"; + /** + * Method generates authentication headers using storage backend credentials passed as normal string + * + * @param username -->> username of the storage backend + * @param password -->> normal decoded password of the storage backend + * @return + */ public static String generateAuthHeader (String username, String password) { byte[] encodedBytes = Base64Utils.encode((username + AUTH_HEADER_COLON + password).getBytes(StandardCharsets.UTF_8)); return BASIC + StringUtils.SPACE + new String(encodedBytes); } + public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject volumeObject) { + CloudStackVolume cloudStackVolumeRequest = null; + + String protocol = details.get(OntapStorageConstants.PROTOCOL); + ProtocolType protocolType = ProtocolType.valueOf(protocol); + switch (protocolType) { + case NFS3: + cloudStackVolumeRequest = new CloudStackVolume(); + cloudStackVolumeRequest.setDatastoreId(String.valueOf(storagePool.getId())); + cloudStackVolumeRequest.setVolumeInfo(volumeObject); + break; + case ISCSI: + Svm svm = new Svm(); + svm.setName(details.get(OntapStorageConstants.SVM_NAME)); + cloudStackVolumeRequest = new CloudStackVolume(); + Lun lunRequest = new Lun(); + lunRequest.setSvm(svm); + + LunSpace lunSpace = new LunSpace(); + lunSpace.setSize(volumeObject.getSize()); + lunRequest.setSpace(lunSpace); + //Lun name is full path like in unified "/vol/VolumeName/LunName" + String lunName = volumeObject.getName().replace(OntapStorageConstants.HYPHEN, OntapStorageConstants.UNDERSCORE); + if(!isValidName(lunName)) { + String errMsg = "createAsync: Invalid dataObject name [" + lunName + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long."; + throw new InvalidParameterValueException(errMsg); + } + String lunFullName = getLunName(storagePool.getName(), lunName); + lunRequest.setName(lunFullName); + + String osType = getOSTypeFromHypervisor(storagePool.getHypervisor().name()); + lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + + cloudStackVolumeRequest.setLun(lunRequest); + break; + default: + throw new CloudRuntimeException("Unsupported protocol " + protocol); + + } + return cloudStackVolumeRequest; + } + + public static boolean isValidName(String name) { + // Check for null and length constraint first + if (name == null || name.length() > 200) { + return false; + } + // Regex: Starts with a letter, followed by letters, digits, or underscores + return name.matches(OntapStorageConstants.ONTAP_NAME_REGEX); + } + + public static String getOSTypeFromHypervisor(String hypervisorType){ + switch (hypervisorType) { + case OntapStorageConstants.KVM: + return Lun.OsTypeEnum.LINUX.name(); + default: + String errMsg = "getOSTypeFromHypervisor : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + public static StorageStrategy getStrategyByStoragePoolDetails(Map details) { if (details == null || details.isEmpty()) { logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); - throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty"); + throw new CloudRuntimeException("Storage pool details are null or empty"); } String protocol = details.get(OntapStorageConstants.PROTOCOL); OntapStorage ontapStorage = new OntapStorage(details.get(OntapStorageConstants.USERNAME), details.get(OntapStorageConstants.PASSWORD), - details.get(OntapStorageConstants.MANAGEMENT_LIF), details.get(OntapStorageConstants.SVM_NAME), Long.parseLong(details.get(OntapStorageConstants.SIZE)), - ProtocolType.valueOf(protocol), - Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED))); + details.get(OntapStorageConstants.STORAGE_IP), details.get(OntapStorageConstants.SVM_NAME), Long.parseLong(details.get(OntapStorageConstants.SIZE)), + ProtocolType.valueOf(protocol)); StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage); boolean isValid = storageStrategy.connect(); if (isValid) { @@ -62,15 +135,23 @@ public static StorageStrategy getStrategyByStoragePoolDetails(MapThis strategy handles VM-level (instance) snapshots for VMs whose volumes + * reside on ONTAP managed primary storage. Instead of creating per-file clones + * (the old approach), it takes ONTAP FlexVolume-level snapshots via the + * ONTAP REST API ({@code POST /api/storage/volumes/{uuid}/snapshots}).

+ * + *

Key Advantage:

+ *

When multiple CloudStack disks (ROOT + DATA) reside on the same ONTAP + * FlexVolume, a single FlexVolume snapshot atomically captures all of them. + * This is both faster and more storage-efficient than per-file clones.

+ * + *

Flow:

+ *
    + *
  1. Group all VM volumes by their parent FlexVolume UUID
  2. + *
  3. Freeze the VM via QEMU guest agent ({@code fsfreeze}) — if quiesce requested
  4. + *
  5. For each unique FlexVolume, create one ONTAP snapshot
  6. + *
  7. Thaw the VM
  8. + *
  9. Record FlexVolume → snapshot UUID mappings in {@code vm_snapshot_details}
  10. + *
+ * + *

Metadata in vm_snapshot_details:

+ *

Each FlexVolume snapshot is stored as a detail row with: + *

    + *
  • name = {@value OntapStorageConstants#ONTAP_FLEXVOL_SNAPSHOT}
  • + *
  • value = {@code "::::::::::"}
  • + *
+ * One row is persisted per CloudStack volume (not per FlexVolume) so that the + * revert operation can restore individual files/LUNs using the ONTAP Snapshot + * File Restore API ({@code POST /api/storage/volumes/{vol}/snapshots/{snap}/files/{path}/restore}).

+ * + *

Strategy Selection:

+ *

Returns {@code StrategyPriority.HIGHEST} when:

+ *
    + *
  • Hypervisor is KVM
  • + *
  • Snapshot type is Disk-only (no memory)
  • + *
  • All VM volumes are on ONTAP managed primary storage
  • + *
+ */ +public class OntapVMSnapshotStrategy extends StorageVMSnapshotStrategy { + + private static final Logger logger = LogManager.getLogger(OntapVMSnapshotStrategy.class); + + /** Separator used in the vm_snapshot_details value to delimit FlexVol UUID, snapshot UUID, snapshot name, and pool ID. */ + static final String DETAIL_SEPARATOR = "::"; + + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; + + @Inject + private VolumeDetailsDao volumeDetailsDao; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + return super.configure(name, params); + } + + // ────────────────────────────────────────────────────────────────────────── + // Strategy Selection + // ────────────────────────────────────────────────────────────────────────── + + @Override + public StrategyPriority canHandle(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; + + // For existing (non-Allocated) snapshots, check if we created them + if (!VMSnapshot.State.Allocated.equals(vmSnapshotVO.getState())) { + // Check for our FlexVolume snapshot details first + List flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT); + if (CollectionUtils.isNotEmpty(flexVolDetails)) { + // Verify the volumes are still on ONTAP storage + if (allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) { + return StrategyPriority.HIGHEST; + } + return StrategyPriority.CANT_HANDLE; + } + // Also check legacy STORAGE_SNAPSHOT details for backward compatibility + List legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT); + if (CollectionUtils.isNotEmpty(legacyDetails) && allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) { + return StrategyPriority.HIGHEST; + } + return StrategyPriority.CANT_HANDLE; + } + + // For new snapshots (Allocated state), check if we can handle this VM + // ONTAP only supports disk-only snapshots, not memory snapshots + if (allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) { + if (vmSnapshotVO.getType() == VMSnapshot.Type.DiskAndMemory) { + logger.debug("canHandle: Memory snapshots (DiskAndMemory) are not supported for VMs on ONTAP storage. VMSnapshot [{}]", vmSnapshot.getId()); + return StrategyPriority.CANT_HANDLE; + } + return StrategyPriority.HIGHEST; + } + + return StrategyPriority.CANT_HANDLE; + } + + @Override + public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) { + // ONTAP FlexVolume snapshots only support disk-only (crash-consistent) snapshots. + // Memory snapshots (snapshotMemory=true) are not supported because: + // 1. ONTAP snapshots capture disk state only, not VM memory + // 2. Allowing memory snapshots would require falling back to libvirt snapshots, + // creating mixed snapshot chains that would cause issues during revert + // Return CANT_HANDLE so VMSnapshotManagerImpl can provide a clear error message. + if (snapshotMemory) { + logger.debug("canHandle: Memory snapshots (snapshotMemory=true) are not supported for VMs on ONTAP storage. VM [{}]", vmId); + return StrategyPriority.CANT_HANDLE; + } + + if (allVolumesOnOntapManagedStorage(vmId)) { + return StrategyPriority.HIGHEST; + } + + return StrategyPriority.CANT_HANDLE; + } + + /** + * Checks whether all volumes of a VM reside on ONTAP managed primary storage. + */ + boolean allVolumesOnOntapManagedStorage(long vmId) { + UserVm userVm = userVmDao.findById(vmId); + if (userVm == null) { + logger.debug("allVolumesOnOntapManagedStorage: VM with id [{}] not found", vmId); + return false; + } + + if (!Hypervisor.HypervisorType.KVM.equals(userVm.getHypervisorType())) { + logger.debug("allVolumesOnOntapManagedStorage: ONTAP VM snapshot strategy only supports KVM hypervisor, VM [{}] uses [{}]", + vmId, userVm.getHypervisorType()); + return false; + } + + // ONTAP VM snapshots work for both Running and Stopped VMs. + // Running VMs may be frozen/thawed (if quiesce is requested). + // Stopped VMs don't need freeze/thaw - just take the FlexVol snapshot directly. + VirtualMachine.State vmState = userVm.getState(); + if (!VirtualMachine.State.Running.equals(vmState) && !VirtualMachine.State.Stopped.equals(vmState)) { + logger.info("allVolumesOnOntapManagedStorage: ONTAP VM snapshot strategy requires VM to be Running or Stopped, VM [{}] is in state [{}], returning false", + vmId, vmState); + return false; + } + + List volumes = volumeDao.findByInstance(vmId); + if (volumes == null || volumes.isEmpty()) { + logger.debug("allVolumesOnOntapManagedStorage: No volumes found for VM [{}]", vmId); + return false; + } + + for (VolumeVO volume : volumes) { + if (volume.getPoolId() == null) { + return false; + } + StoragePoolVO pool = storagePool.findById(volume.getPoolId()); + if (pool == null) { + return false; + } + if (!pool.isManaged()) { + logger.debug("allVolumesOnOntapManagedStorage: Volume [{}] is on non-managed storage pool [{}], not ONTAP", + volume.getId(), pool.getName()); + return false; + } + if (!OntapStorageConstants.ONTAP_PLUGIN_NAME.equals(pool.getStorageProviderName())) { + logger.debug("allVolumesOnOntapManagedStorage: Volume [{}] is on managed pool [{}] with provider [{}], not ONTAP", + volume.getId(), pool.getName(), pool.getStorageProviderName()); + return false; + } + } + + logger.debug("allVolumesOnOntapManagedStorage: All volumes of VM [{}] are on ONTAP managed storage, this strategy can handle", vmId); + return true; + } + + // ────────────────────────────────────────────────────────────────────────── + // Take VM Snapshot (FlexVolume-level) + // ────────────────────────────────────────────────────────────────────────── + + /** + * Takes a VM-level snapshot by freezing the VM, creating ONTAP FlexVolume-level + * snapshots (one per unique FlexVolume), and then thawing the VM. + * + *

Volumes are grouped by their parent FlexVolume UUID (from storage pool details). + * For each unique FlexVolume, exactly one ONTAP snapshot is created via + * {@code POST /api/storage/volumes/{uuid}/snapshots}. This means if a VM has + * ROOT and DATA disks on the same FlexVolume, only one snapshot is created.

+ * + *

Memory Snapshots Not Supported: This strategy only supports disk-only + * (crash-consistent) snapshots. Memory snapshots (snapshotmemory=true) are rejected + * with a clear error message. This is because ONTAP FlexVolume snapshots capture disk + * state only, and allowing mixed snapshot chains (ONTAP disk + libvirt memory) would + * cause issues during revert operations.

+ * + * @throws CloudRuntimeException if memory snapshot is requested + */ + @Override + public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { + Long hostId = vmSnapshotHelper.pickRunningHost(vmSnapshot.getVmId()); + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; + + // Transition to Creating state FIRST - this is required so that the finally block + // can properly transition to Error state via OperationFailed event if anything fails. + // (OperationFailed can only transition FROM Creating state, not from Allocated) + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + FreezeThawVMAnswer freezeAnswer = null; + FreezeThawVMCommand thawCmd = null; + FreezeThawVMAnswer thawAnswer = null; + long startFreeze = 0; + + // Track which FlexVolume snapshots were created (for rollback) + List createdSnapshots = new ArrayList<>(); + + boolean result = false; + try { + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + + long prev_chain_size = 0; + long virtual_size = 0; + + // Build snapshot parent chain + VMSnapshotTO current = null; + VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId()); + if (currentSnapshot != null) { + current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot); + } + + // Respect the user's quiesce option from the VM snapshot request + boolean quiescevm = true; // default to true for safety + VMSnapshotOptions options = vmSnapshotVO.getOptions(); + if (options != null) { + quiescevm = options.needQuiesceVM(); + } + + // Check if VM is actually running - freeze/thaw only makes sense for running VMs + boolean vmIsRunning = VirtualMachine.State.Running.equals(userVm.getState()); + boolean shouldFreezeThaw = quiescevm && vmIsRunning; + + if (!vmIsRunning) { + logger.info("takeVMSnapshot: VM [{}] is in state [{}] (not Running). Skipping freeze/thaw - " + + "FlexVolume snapshot will be taken directly.", userVm.getInstanceName(), userVm.getState()); + } else if (quiescevm) { + logger.info("takeVMSnapshot: Quiesce option is enabled for ONTAP VM Snapshot of VM [{}]. " + + "VM file systems will be frozen/thawed for application-consistent snapshots.", userVm.getInstanceName()); + } else { + logger.info("takeVMSnapshot: Quiesce option is disabled for ONTAP VM Snapshot of VM [{}]. " + + "Snapshots will be crash-consistent only.", userVm.getInstanceName()); + } + + VMSnapshotTO target = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(), + vmSnapshot.getType(), null, vmSnapshot.getDescription(), false, current, quiescevm); + + if (current == null) { + vmSnapshotVO.setParent(null); + } else { + vmSnapshotVO.setParent(current.getId()); + } + + CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand( + userVm.getInstanceName(), userVm.getUuid(), target, volumeTOs, guestOS.getDisplayName()); + + logger.info("takeVMSnapshot: Creating ONTAP FlexVolume VM Snapshot for VM [{}] with quiesce={}", userVm.getInstanceName(), quiescevm); + + // Prepare volume info list and calculate sizes + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + virtual_size += volumeObjectTO.getSize(); + VolumeVO volumeVO = volumeDao.findById(volumeObjectTO.getId()); + prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize(); + } + + // ── Group volumes by FlexVolume UUID ── + Map flexVolGroups = groupVolumesByFlexVol(volumeTOs); + + logger.info("takeVMSnapshot: VM [{}] has {} volumes across {} unique FlexVolume(s)", + userVm.getInstanceName(), volumeTOs.size(), flexVolGroups.size()); + + // ── Step 1: Freeze the VM (only if quiescing is requested AND VM is running) ── + if (shouldFreezeThaw) { + FreezeThawVMCommand freezeCommand = new FreezeThawVMCommand(userVm.getInstanceName()); + freezeCommand.setOption(FreezeThawVMCommand.FREEZE); + freezeAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, freezeCommand); + startFreeze = System.nanoTime(); + + thawCmd = new FreezeThawVMCommand(userVm.getInstanceName()); + thawCmd.setOption(FreezeThawVMCommand.THAW); + + if (freezeAnswer == null || !freezeAnswer.getResult()) { + String detail = (freezeAnswer != null) ? freezeAnswer.getDetails() : "no response from agent"; + throw new CloudRuntimeException("Could not freeze VM [" + userVm.getInstanceName() + + "] for ONTAP snapshot. Ensure qemu-guest-agent is installed and running. Details: " + detail); + } + + logger.info("takeVMSnapshot: VM [{}] frozen successfully via QEMU guest agent", userVm.getInstanceName()); + } else { + logger.info("takeVMSnapshot: Skipping VM freeze for VM [{}] (quiesce={}, vmIsRunning={})", + userVm.getInstanceName(), quiescevm, vmIsRunning); + } + + // ── Step 2: Create FlexVolume-level snapshots ── + try { + String snapshotNameBase = buildSnapshotName(vmSnapshot); + + for (Map.Entry entry : flexVolGroups.entrySet()) { + String flexVolUuid = entry.getKey(); + FlexVolGroupInfo groupInfo = entry.getValue(); + long startSnapshot = System.nanoTime(); + + // Build storage strategy from pool details to get the feign client + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(groupInfo.poolDetails); + SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + // Use the same snapshot name for all FlexVolumes in this VM snapshot + // (each FlexVolume gets its own independent snapshot with this name) + FlexVolSnapshot snapshotRequest = new FlexVolSnapshot(snapshotNameBase, + "CloudStack VM snapshot " + vmSnapshot.getName() + " for VM " + userVm.getInstanceName()); + + logger.info("takeVMSnapshot: Creating ONTAP FlexVolume snapshot [{}] on FlexVol UUID [{}] covering {} volume(s)", + snapshotNameBase, flexVolUuid, groupInfo.volumeIds.size()); + + JobResponse jobResponse = snapshotClient.createSnapshot(authHeader, flexVolUuid, snapshotRequest); + if (jobResponse == null || jobResponse.getJob() == null) { + throw new CloudRuntimeException("Failed to initiate FlexVolume snapshot on FlexVol UUID [" + flexVolUuid + "]"); + } + + // Poll for job completion + Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2); + if (!jobSucceeded) { + throw new CloudRuntimeException("FlexVolume snapshot job failed on FlexVol UUID [" + flexVolUuid + "]"); + } + + // Retrieve the created snapshot UUID by name + String snapshotUuid = resolveSnapshotUuid(snapshotClient, authHeader, flexVolUuid, snapshotNameBase); + + String protocol = groupInfo.poolDetails.get(OntapStorageConstants.PROTOCOL); + + // Create one detail per CloudStack volume in this FlexVol group (for single-file restore during revert) + for (Long volumeId : groupInfo.volumeIds) { + String volumePath = resolveVolumePathOnOntap(volumeId, protocol, groupInfo.poolDetails); + FlexVolSnapshotDetail detail = new FlexVolSnapshotDetail( + flexVolUuid, snapshotUuid, snapshotNameBase, volumePath, groupInfo.poolId, protocol); + createdSnapshots.add(detail); + } + + logger.info("takeVMSnapshot: ONTAP FlexVolume snapshot [{}] (uuid={}) on FlexVol [{}] completed in {} ms. Covers volumes: {}", + snapshotNameBase, snapshotUuid, flexVolUuid, + TimeUnit.MILLISECONDS.convert(System.nanoTime() - startSnapshot, TimeUnit.NANOSECONDS), + groupInfo.volumeIds); + } + } finally { + // ── Step 3: Thaw the VM (only if it was frozen, always even on error) ── + if (quiescevm && freezeAnswer != null && freezeAnswer.getResult()) { + try { + thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd); + if (thawAnswer != null && thawAnswer.getResult()) { + logger.info("takeVMSnapshot: VM [{}] thawed successfully. Total freeze duration: {} ms", + userVm.getInstanceName(), + TimeUnit.MILLISECONDS.convert(System.nanoTime() - startFreeze, TimeUnit.NANOSECONDS)); + } else { + logger.warn("takeVMSnapshot: Failed to thaw VM [{}]: {}", userVm.getInstanceName(), + (thawAnswer != null) ? thawAnswer.getDetails() : "no response"); + } + } catch (Exception thawEx) { + logger.error("takeVMSnapshot: Exception while thawing VM [{}]: {}", userVm.getInstanceName(), thawEx.getMessage(), thawEx); + } + } + } + + // ── Step 4: Persist FlexVolume snapshot details (one row per CloudStack volume) ── + for (FlexVolSnapshotDetail detail : createdSnapshots) { + vmSnapshotDetailsDao.persist(new VMSnapshotDetailsVO( + vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT, detail.toString(), true)); + } + + // ── Step 5: Finalize via parent processAnswer ── + CreateVMSnapshotAnswer answer = new CreateVMSnapshotAnswer(ccmd, true, ""); + answer.setVolumeTOs(volumeTOs); + + processAnswer(vmSnapshotVO, userVm, answer, null); + logger.info("takeVMSnapshot: ONTAP FlexVolume VM Snapshot [{}] created successfully for VM [{}] ({} FlexVol snapshot(s))", + vmSnapshot.getName(), userVm.getInstanceName(), createdSnapshots.size()); + + long new_chain_size = 0; + for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo); + new_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, + new_chain_size - prev_chain_size, virtual_size); + + result = true; + return vmSnapshot; + + } catch (OperationTimedoutException e) { + logger.error("takeVMSnapshot: ONTAP VM Snapshot [{}] timed out: {}", vmSnapshot.getName(), e.getMessage()); + throw new CloudRuntimeException("Creating Instance Snapshot: " + vmSnapshot.getName() + " timed out: " + e.getMessage()); + } catch (AgentUnavailableException e) { + logger.error("takeVMSnapshot: ONTAP VM Snapshot [{}] failed, agent unavailable: {}", vmSnapshot.getName(), e.getMessage()); + throw new CloudRuntimeException("Creating Instance Snapshot: " + vmSnapshot.getName() + " failed: " + e.getMessage()); + } catch (CloudRuntimeException e) { + throw e; + } finally { + if (!result) { + // Rollback all FlexVolume snapshots created so far (deduplicate by FlexVol+Snapshot) + Map rolledBack = new HashMap<>(); + for (FlexVolSnapshotDetail detail : createdSnapshots) { + String dedupeKey = detail.flexVolUuid + "::" + detail.snapshotUuid; + if (!rolledBack.containsKey(dedupeKey)) { + try { + rollbackFlexVolSnapshot(detail); + rolledBack.put(dedupeKey, Boolean.TRUE); + } catch (Exception rollbackEx) { + logger.error("takeVMSnapshot: Failed to rollback FlexVol snapshot [{}] on FlexVol [{}]: {}", + detail.snapshotUuid, detail.flexVolUuid, rollbackEx.getMessage()); + } + } + } + + // Ensure VM is thawed if we haven't done so + if (thawAnswer == null && freezeAnswer != null && freezeAnswer.getResult()) { + try { + logger.info("takeVMSnapshot: Thawing VM [{}] during error cleanup", userVm.getInstanceName()); + thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd); + } catch (Exception ex) { + logger.error("takeVMSnapshot: Could not thaw VM during cleanup: {}", ex.getMessage()); + } + } + + // Clean up VM snapshot details and transition state + try { + List vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshot.getId()); + for (VMSnapshotDetailsVO detail : vmSnapshotDetails) { + if (OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT.equals(detail.getName())) { + vmSnapshotDetailsDao.remove(detail.getId()); + } + } + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + logger.error("takeVMSnapshot: Cannot set VM Snapshot state to OperationFailed: {}", e1.getMessage()); + } + } + } + } + + // ────────────────────────────────────────────────────────────────────────── + // Delete VM Snapshot + // ────────────────────────────────────────────────────────────────────────── + + @Override + public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + String vmInstanceName = userVm.getInstanceName(); + VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(vmSnapshotVO).getParent(); + + VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshotVO.getId(), vmSnapshotVO.getName(), vmSnapshotVO.getType(), + vmSnapshotVO.getCreated().getTime(), vmSnapshotVO.getDescription(), vmSnapshotVO.getCurrent(), parent, true); + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + DeleteVMSnapshotCommand deleteSnapshotCommand = new DeleteVMSnapshotCommand(vmInstanceName, vmSnapshotTO, + volumeTOs, guestOS.getDisplayName()); + + // Check for FlexVolume snapshots (new approach) + List flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT); + if (CollectionUtils.isNotEmpty(flexVolDetails)) { + deleteFlexVolSnapshots(flexVolDetails); + } + + // Also handle legacy STORAGE_SNAPSHOT details (backward compatibility) + List legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT); + if (CollectionUtils.isNotEmpty(legacyDetails)) { + deleteDiskSnapshot(vmSnapshot); + } + + processAnswer(vmSnapshotVO, userVm, new DeleteVMSnapshotAnswer(deleteSnapshotCommand, volumeTOs), null); + long full_chain_size = 0; + for (VolumeObjectTO volumeTo : volumeTOs) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo); + full_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L); + return true; + } catch (CloudRuntimeException err) { + String errMsg = String.format("Delete of ONTAP VM Snapshot [%s] of VM [%s] failed: %s", + vmSnapshot.getName(), userVm.getInstanceName(), err.getMessage()); + logger.error(errMsg, err); + throw new CloudRuntimeException(errMsg, err); + } + } + + // ────────────────────────────────────────────────────────────────────────── + // Revert VM Snapshot + // ────────────────────────────────────────────────────────────────────────── + + @Override + public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + String vmInstanceName = userVm.getInstanceName(); + VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(vmSnapshotVO).getParent(); + + VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshotVO.getId(), vmSnapshotVO.getName(), vmSnapshotVO.getType(), + vmSnapshotVO.getCreated().getTime(), vmSnapshotVO.getDescription(), vmSnapshotVO.getCurrent(), parent, true); + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + RevertToVMSnapshotCommand revertToSnapshotCommand = new RevertToVMSnapshotCommand(vmInstanceName, + userVm.getUuid(), vmSnapshotTO, volumeTOs, guestOS.getDisplayName()); + + // Check for FlexVolume snapshots (new approach) + List flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT); + if (CollectionUtils.isNotEmpty(flexVolDetails)) { + revertFlexVolSnapshots(flexVolDetails); + } + + // Also handle legacy STORAGE_SNAPSHOT details (backward compatibility) + List legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT); + if (CollectionUtils.isNotEmpty(legacyDetails)) { + revertDiskSnapshot(vmSnapshot); + } + + RevertToVMSnapshotAnswer answer = new RevertToVMSnapshotAnswer(revertToSnapshotCommand, true, ""); + answer.setVolumeTOs(volumeTOs); + processAnswer(vmSnapshotVO, userVm, answer, null); + result = true; + } catch (CloudRuntimeException e) { + logger.error("revertVMSnapshot: Revert ONTAP VM Snapshot [{}] failed: {}", vmSnapshot.getName(), e.getMessage(), e); + throw new CloudRuntimeException("Revert ONTAP VM Snapshot ["+ vmSnapshot.getName() +"] failed."); + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + logger.error("Cannot set Instance Snapshot state due to: " + e1.getMessage()); + } + } + } + return result; + } + + // ────────────────────────────────────────────────────────────────────────── + // FlexVolume Snapshot Helpers + // ────────────────────────────────────────────────────────────────────────── + + /** + * Groups volumes by their parent FlexVolume UUID using storage pool details. + * + * @param volumeTOs list of volume transfer objects + * @return map of FlexVolume UUID → group info (pool details, pool ID, volume IDs) + */ + Map groupVolumesByFlexVol(List volumeTOs) { + Map groups = new HashMap<>(); + + for (VolumeObjectTO volumeTO : volumeTOs) { + VolumeVO volumeVO = volumeDao.findById(volumeTO.getId()); + if (volumeVO == null || volumeVO.getPoolId() == null) { + throw new CloudRuntimeException("Volume [" + volumeTO.getId() + "] not found or has no pool assigned"); + } + + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(volumeVO.getPoolId()); + String flexVolUuid = poolDetails.get(OntapStorageConstants.VOLUME_UUID); + if (flexVolUuid == null || flexVolUuid.isEmpty()) { + throw new CloudRuntimeException("FlexVolume UUID not found in pool details for pool [" + volumeVO.getPoolId() + "]"); + } + + FlexVolGroupInfo group = groups.get(flexVolUuid); + if (group == null) { + group = new FlexVolGroupInfo(poolDetails, volumeVO.getPoolId()); + groups.put(flexVolUuid, group); + } + group.volumeIds.add(volumeVO.getId()); + } + + return groups; + } + + /** + * Builds a deterministic, ONTAP-safe snapshot name for a VM snapshot. + * Format: {@code vmsnap__} + */ + String buildSnapshotName(VMSnapshot vmSnapshot) { + String name = "vmsnap_" + vmSnapshot.getId() + "_" + System.currentTimeMillis(); + // ONTAP snapshot names: max 256 chars, must start with letter, only alphanumeric and underscores + if (name.length() > OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH) { + name = name.substring(0, OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH); + } + return name; + } + + /** + * Resolves the UUID of a newly created FlexVolume snapshot by name. + */ + String resolveSnapshotUuid(SnapshotFeignClient client, String authHeader, + String flexVolUuid, String snapshotName) { + Map queryParams = new HashMap<>(); + queryParams.put("name", snapshotName); + OntapResponse response = client.getSnapshots(authHeader, flexVolUuid, queryParams); + if (response == null || response.getRecords() == null || response.getRecords().isEmpty()) { + throw new CloudRuntimeException("Could not find FlexVolume snapshot [" + snapshotName + + "] on FlexVol [" + flexVolUuid + "] after creation"); + } + return response.getRecords().get(0).getUuid(); + } + + /** + * Resolves the ONTAP-side path of a CloudStack volume within its FlexVolume. + * + *
    + *
  • For NFS volumes the path is the filename (e.g. {@code uuid.qcow2}) + * retrieved via {@link VolumeVO#getPath()}.
  • + *
  • For iSCSI volumes the path is the LUN name within the FlexVolume + * (e.g. {@code /vol/vol1/lun_name}) stored in volume_details.
  • + *
+ * + * @param volumeId the CloudStack volume ID + * @param protocol the storage protocol (e.g. "NFS3", "ISCSI") + * @param poolDetails storage pool detail map (used for fall-back lookups) + * @return the volume path relative to the FlexVolume root + */ + String resolveVolumePathOnOntap(Long volumeId, String protocol, Map poolDetails) { + if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + // iSCSI – the LUN's ONTAP name is stored as a volume detail + VolumeDetailVO lunDetail = volumeDetailsDao.findDetail(volumeId, OntapStorageConstants.LUN_DOT_NAME); + if (lunDetail == null || lunDetail.getValue() == null || lunDetail.getValue().isEmpty()) { + throw new CloudRuntimeException( + "LUN name (volume detail '" + OntapStorageConstants.LUN_DOT_NAME + "') not found for iSCSI volume [" + volumeId + "]"); + } + return lunDetail.getValue(); + } else { + // NFS – volumeVO.getPath() holds the file path (e.g. "uuid.qcow2") + VolumeVO vol = volumeDao.findById(volumeId); + if (vol == null || vol.getPath() == null || vol.getPath().isEmpty()) { + throw new CloudRuntimeException("Volume path not found for NFS volume [" + volumeId + "]"); + } + return vol.getPath(); + } + } + + /** + * Rolls back (deletes) a FlexVolume snapshot that was created during a failed takeVMSnapshot. + */ + void rollbackFlexVolSnapshot(FlexVolSnapshotDetail detail) { + try { + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId); + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + SnapshotFeignClient client = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + logger.info("rollbackFlexVolSnapshot: Rolling back FlexVol snapshot [{}] (uuid={}) on FlexVol [{}]", + detail.snapshotName, detail.snapshotUuid, detail.flexVolUuid); + + JobResponse jobResponse = client.deleteSnapshot(authHeader, detail.flexVolUuid, detail.snapshotUuid); + if (jobResponse != null && jobResponse.getJob() != null) { + storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 10, 2); + } + } catch (Exception e) { + logger.error("rollbackFlexVolSnapshot: Rollback of FlexVol snapshot failed: {}", e.getMessage(), e); + } + } + + /** + * Deletes all FlexVolume snapshots associated with a VM snapshot. + * + *

Since there is one detail row per CloudStack volume, multiple rows may reference + * the same FlexVol + snapshot combination. This method deduplicates to delete each + * underlying ONTAP snapshot only once.

+ */ + void deleteFlexVolSnapshots(List flexVolDetails) { + // Track which FlexVol+Snapshot pairs have already been deleted + Map deletedSnapshots = new HashMap<>(); + + for (VMSnapshotDetailsVO detailVO : flexVolDetails) { + FlexVolSnapshotDetail detail = FlexVolSnapshotDetail.parse(detailVO.getValue()); + String dedupeKey = detail.flexVolUuid + "::" + detail.snapshotUuid; + + // Only delete the ONTAP snapshot once per FlexVol+Snapshot pair + if (!deletedSnapshots.containsKey(dedupeKey)) { + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId); + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + SnapshotFeignClient client = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + logger.info("deleteFlexVolSnapshots: Deleting ONTAP FlexVol snapshot [{}] (uuid={}) on FlexVol [{}]", + detail.snapshotName, detail.snapshotUuid, detail.flexVolUuid); + + JobResponse jobResponse = client.deleteSnapshot(authHeader, detail.flexVolUuid, detail.snapshotUuid); + if (jobResponse != null && jobResponse.getJob() != null) { + storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2); + } + + deletedSnapshots.put(dedupeKey, Boolean.TRUE); + logger.info("deleteFlexVolSnapshots: Deleted ONTAP FlexVol snapshot [{}] on FlexVol [{}]", detail.snapshotName, detail.flexVolUuid); + } + + // Always remove the DB detail row + vmSnapshotDetailsDao.remove(detailVO.getId()); + } + } + + /** + * Reverts all volumes of a VM snapshot using ONTAP CLI-based Snapshot File Restore. + * + *

Instead of restoring the entire FlexVolume to a snapshot (which would affect + * other VMs/files on the same FlexVol), this method restores only the individual + * files or LUNs belonging to this VM using the dedicated ONTAP CLI snapshot file + * restore API:

+ * + *

{@code POST /api/private/cli/volume/snapshot/restore-file}

+ * + *

For each persisted detail row (one per CloudStack volume):

+ *
    + *
  • NFS: restores {@code } from the snapshot to the live volume
  • + *
  • iSCSI: restores {@code } from the snapshot to the live volume
  • + *
+ */ + void revertFlexVolSnapshots(List flexVolDetails) { + for (VMSnapshotDetailsVO detailVO : flexVolDetails) { + FlexVolSnapshotDetail detail = FlexVolSnapshotDetail.parse(detailVO.getValue()); + + if (detail.volumePath == null || detail.volumePath.isEmpty()) { + // Legacy detail row without volumePath – cannot do single-file restore + logger.warn("revertFlexVolSnapshots: FlexVol snapshot detail for FlexVol [{}] has no volumePath (legacy format). " + + "Skipping single-file restore for this entry.", detail.flexVolUuid); + continue; + } + + Map poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId); + StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails); + SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient(); + String authHeader = storageStrategy.getAuthHeader(); + + // Get SVM name and FlexVolume name from pool details + String svmName = poolDetails.get(OntapStorageConstants.SVM_NAME); + String flexVolName = poolDetails.get(OntapStorageConstants.VOLUME_NAME); + + if (svmName == null || svmName.isEmpty()) { + throw new CloudRuntimeException("SVM name not found in pool details for pool [" + detail.poolId + "]"); + } + if (flexVolName == null || flexVolName.isEmpty()) { + throw new CloudRuntimeException("FlexVolume name not found in pool details for pool [" + detail.poolId + "]"); + } + + // The path must start with "/" for the ONTAP CLI API + String ontapFilePath = detail.volumePath.startsWith("/") ? detail.volumePath : "/" + detail.volumePath; + + logger.info("revertFlexVolSnapshots: Restoring volume [{}] from FlexVol snapshot [{}] on FlexVol [{}] (protocol={})", + ontapFilePath, detail.snapshotName, flexVolName, detail.protocol); + + // Use CLI-based restore API: POST /api/private/cli/volume/snapshot/restore-file + CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest( + svmName, flexVolName, detail.snapshotName, ontapFilePath); + + JobResponse jobResponse = snapshotClient.restoreFileFromSnapshotCli(authHeader, restoreRequest); + + if (jobResponse != null && jobResponse.getJob() != null) { + Boolean success = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 60, 2); + if (!success) { + throw new CloudRuntimeException("Snapshot file restore failed for volume path [" + + ontapFilePath + "] from snapshot [" + detail.snapshotName + + "] on FlexVol [" + flexVolName + "]"); + } + } + + logger.info("revertFlexVolSnapshots: Successfully restored volume [{}] from snapshot [{}] on FlexVol [{}]", + ontapFilePath, detail.snapshotName, flexVolName); + } + } + + // ────────────────────────────────────────────────────────────────────────── + // Inner classes for grouping & detail tracking + // ────────────────────────────────────────────────────────────────────────── + + /** + * Groups information about volumes that share the same FlexVolume. + */ + static class FlexVolGroupInfo { + final Map poolDetails; + final long poolId; + final List volumeIds = new ArrayList<>(); + + FlexVolGroupInfo(Map poolDetails, long poolId) { + this.poolDetails = poolDetails; + this.poolId = poolId; + } + } + + /** + * Holds the metadata for a single volume's FlexVolume snapshot entry (used during create and for + * serialization/deserialization to/from vm_snapshot_details). + * + *

One row is persisted per CloudStack volume. Multiple volumes may share the same + * FlexVol snapshot (if they reside on the same FlexVolume).

+ * + *

Serialized format: {@code "::::::::::"}

+ */ + static class FlexVolSnapshotDetail { + final String flexVolUuid; + final String snapshotUuid; + final String snapshotName; + /** The ONTAP-side path of the file or LUN within the FlexVolume (e.g. "uuid.qcow2" for NFS, "/vol/vol1/lun1" for iSCSI). */ + final String volumePath; + final long poolId; + /** Storage protocol: NFS3, ISCSI, etc. */ + final String protocol; + + FlexVolSnapshotDetail(String flexVolUuid, String snapshotUuid, String snapshotName, + String volumePath, long poolId, String protocol) { + this.flexVolUuid = flexVolUuid; + this.snapshotUuid = snapshotUuid; + this.snapshotName = snapshotName; + this.volumePath = volumePath; + this.poolId = poolId; + this.protocol = protocol; + } + + /** + * Parses a vm_snapshot_details value string back into a FlexVolSnapshotDetail. + */ + static FlexVolSnapshotDetail parse(String value) { + String[] parts = value.split(DETAIL_SEPARATOR); + if (parts.length == 4) { + // Legacy format without volumePath and protocol: flexVolUuid::snapshotUuid::snapshotName::poolId + return new FlexVolSnapshotDetail(parts[0], parts[1], parts[2], null, Long.parseLong(parts[3]), null); + } + if (parts.length != 6) { + throw new CloudRuntimeException("Invalid ONTAP FlexVol snapshot detail format: " + value); + } + return new FlexVolSnapshotDetail(parts[0], parts[1], parts[2], parts[3], Long.parseLong(parts[4]), parts[5]); + } + + @Override + public String toString() { + return flexVolUuid + DETAIL_SEPARATOR + snapshotUuid + DETAIL_SEPARATOR + snapshotName + + DETAIL_SEPARATOR + volumePath + DETAIL_SEPARATOR + poolId + DETAIL_SEPARATOR + protocol; + } + } +} diff --git a/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml index 6ab9c46fcf9d..bb907871469c 100644 --- a/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml +++ b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml @@ -30,4 +30,7 @@ + + diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java new file mode 100644 index 000000000000..68fd40d5b7f1 --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java @@ -0,0 +1,571 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.driver; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.service.UnifiedSANStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import org.apache.cloudstack.storage.utils.OntapStorageUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.HashMap; +import java.util.Map; + +import static com.cloud.agent.api.to.DataObjectType.VOLUME; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +class OntapPrimaryDatastoreDriverTest { + + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + + @Mock + private PrimaryDataStoreDao storagePoolDao; + + @Mock + private VolumeDao volumeDao; + + @Mock + private VolumeDetailsDao volumeDetailsDao; + + @Mock + private DataStore dataStore; + + @Mock + private VolumeInfo volumeInfo; + + @Mock + private StoragePoolVO storagePool; + + @Mock + private VolumeVO volumeVO; + + @Mock + private Host host; + + @Mock + private UnifiedSANStrategy sanStrategy; + + @Mock + private AsyncCompletionCallback createCallback; + + @Mock + private AsyncCompletionCallback commandCallback; + + @InjectMocks + private OntapPrimaryDatastoreDriver driver; + + private Map storagePoolDetails; + + @BeforeEach + void setUp() { + storagePoolDetails = new HashMap<>(); + storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + storagePoolDetails.put(OntapStorageConstants.SVM_NAME, "svm1"); + } + + @Test + void testGetCapabilities() { + Map capabilities = driver.getCapabilities(); + + assertNotNull(capabilities); + // With SIS clone approach, driver advertises storage system snapshot capability + // so StorageSystemSnapshotStrategy handles snapshot backup to secondary storage + assertEquals(Boolean.TRUE.toString(), capabilities.get("STORAGE_SYSTEM_SNAPSHOT")); + assertEquals(Boolean.TRUE.toString(), capabilities.get("CAN_CREATE_VOLUME_FROM_SNAPSHOT")); + } + + @Test + void testCreateAsync_NullDataObject_ThrowsException() { + assertThrows(InvalidParameterValueException.class, + () -> driver.createAsync(dataStore, null, createCallback)); + } + + @Test + void testCreateAsync_NullDataStore_ThrowsException() { + assertThrows(InvalidParameterValueException.class, + () -> driver.createAsync(null, volumeInfo, createCallback)); + } + + @Test + void testCreateAsync_NullCallback_ThrowsException() { + assertThrows(InvalidParameterValueException.class, + () -> driver.createAsync(dataStore, volumeInfo, null)); + } + + @Test + void testCreateAsync_VolumeWithISCSI_Success() { + // Setup + when(dataStore.getId()).thenReturn(1L); + when(dataStore.getUuid()).thenReturn("pool-uuid-123"); + when(dataStore.getName()).thenReturn("ontap-pool"); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + when(volumeInfo.getName()).thenReturn("test-volume"); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); + + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + + Lun mockLun = new Lun(); + mockLun.setName("/vol/vol1/lun1"); + mockLun.setUuid("lun-uuid-123"); + // Create request volume (returned by Utility.createCloudStackVolumeRequestByProtocol) + CloudStackVolume requestVolume = new CloudStackVolume(); + requestVolume.setLun(mockLun); + // Create response volume (returned by sanStrategy.createCloudStackVolume) + CloudStackVolume responseVolume = new CloudStackVolume(); + responseVolume.setLun(mockLun); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(sanStrategy); + utilityMock.when(() -> OntapStorageUtils.createCloudStackVolumeRequestByProtocol( + any(), any(), any())).thenReturn(requestVolume); + when(sanStrategy.createCloudStackVolume(any())).thenReturn(responseVolume); + + // Execute + driver.createAsync(dataStore, volumeInfo, createCallback); + + // Verify + ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class); + verify(createCallback).complete(resultCaptor.capture()); + + CreateCmdResult result = resultCaptor.getValue(); + assertNotNull(result); + assertTrue(result.isSuccess()); + + verify(volumeDetailsDao).addDetail(eq(100L), eq(OntapStorageConstants.LUN_DOT_UUID), eq("lun-uuid-123"), eq(false)); + verify(volumeDetailsDao).addDetail(eq(100L), eq(OntapStorageConstants.LUN_DOT_NAME), eq("/vol/vol1/lun1"), eq(false)); + verify(volumeDao).update(eq(100L), any(VolumeVO.class)); + } + } + + @Test + void testCreateAsync_VolumeWithNFS_Success() { + // Setup + storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.NFS3.name()); + + when(dataStore.getId()).thenReturn(1L); + when(dataStore.getUuid()).thenReturn("pool-uuid-123"); + when(dataStore.getName()).thenReturn("ontap-pool"); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + when(volumeInfo.getName()).thenReturn("test-volume"); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + + CloudStackVolume mockCloudStackVolume = new CloudStackVolume(); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + utilityMock.when(() -> OntapStorageUtils.createCloudStackVolumeRequestByProtocol( + any(), any(), any())).thenReturn(mockCloudStackVolume); + + when(sanStrategy.createCloudStackVolume(any())).thenReturn(mockCloudStackVolume); + + // Execute + driver.createAsync(dataStore, volumeInfo, createCallback); + + // Verify + ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class); + verify(createCallback).complete(resultCaptor.capture()); + + CreateCmdResult result = resultCaptor.getValue(); + assertNotNull(result); + assertTrue(result.isSuccess()); + verify(volumeDao).update(eq(100L), any(VolumeVO.class)); + } + } + + @Test + void testDeleteAsync_NullStore_ThrowsException() { + ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class); + + driver.deleteAsync(null, volumeInfo, commandCallback); + + verify(commandCallback).complete(resultCaptor.capture()); + CommandResult result = resultCaptor.getValue(); + assertFalse(result.isSuccess()); + assertTrue(result.getResult().contains("store or data is null")); + } + + @Test + void testDeleteAsync_ISCSIVolume_Success() { + // Setup + when(dataStore.getId()).thenReturn(1L); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + + VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false); + VolumeDetailVO lunUuidDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123", false); + + when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail); + when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_UUID)).thenReturn(lunUuidDetail); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + + doNothing().when(sanStrategy).deleteCloudStackVolume(any()); + + // Execute + driver.deleteAsync(dataStore, volumeInfo, commandCallback); + + // Verify + ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class); + verify(commandCallback).complete(resultCaptor.capture()); + + CommandResult result = resultCaptor.getValue(); + assertNotNull(result); + assertTrue(result.isSuccess()); + verify(sanStrategy).deleteCloudStackVolume(any(CloudStackVolume.class)); + } + } + + @Test + void testDeleteAsync_NFSVolume_Success() { + // Setup + storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.NFS3.name()); + + when(dataStore.getId()).thenReturn(1L); + when(volumeInfo.getType()).thenReturn(VOLUME); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + + // Execute + driver.deleteAsync(dataStore, volumeInfo, commandCallback); + + // Verify + ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class); + verify(commandCallback).complete(resultCaptor.capture()); + + CommandResult result = resultCaptor.getValue(); + assertNotNull(result); + // NFS deletion doesn't fail, handled by hypervisor + } + + @Test + void testGrantAccess_NullParameters_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> driver.grantAccess(null, host, dataStore)); + + assertThrows(CloudRuntimeException.class, + () -> driver.grantAccess(volumeInfo, null, dataStore)); + + assertThrows(CloudRuntimeException.class, + () -> driver.grantAccess(volumeInfo, host, null)); + } + + @Test + void testGrantAccess_ClusterScope_Success() { + // Setup + when(dataStore.getId()).thenReturn(1L); + when(dataStore.getUuid()).thenReturn("pool-uuid-123"); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456"); + when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); + + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + + when(host.getName()).thenReturn("host1"); + + VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false); + when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail); + + // Mock AccessGroup with existing igroup + AccessGroup existingAccessGroup = new AccessGroup(); + Igroup existingIgroup = new Igroup(); + existingIgroup.setName("igroup1"); + existingAccessGroup.setIgroup(existingIgroup); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString())) + .thenReturn("igroup1"); + + when(sanStrategy.getAccessGroup(any())).thenReturn(existingAccessGroup); + when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0"); + + // Execute + boolean result = driver.grantAccess(volumeInfo, host, dataStore); + + // Verify + assertTrue(result); + verify(volumeDao).update(eq(100L), any(VolumeVO.class)); + verify(sanStrategy).getAccessGroup(any()); + verify(sanStrategy).ensureLunMapped(anyString(), anyString(), anyString()); + verify(sanStrategy, never()).validateInitiatorInAccessGroup(anyString(), anyString(), any(Igroup.class)); + } + } + + @Test + void testGrantAccess_IgroupNotFound_CreatesNewIgroup() { + // Setup - use HostVO mock since production code casts Host to HostVO + HostVO hostVO = mock(HostVO.class); + when(hostVO.getName()).thenReturn("host1"); + + when(dataStore.getId()).thenReturn(1L); + when(dataStore.getUuid()).thenReturn("pool-uuid-123"); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456"); + when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); + + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + + VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false); + when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail); + + // Mock getAccessGroup returning null (igroup doesn't exist) + AccessGroup createdAccessGroup = new AccessGroup(); + Igroup createdIgroup = new Igroup(); + createdIgroup.setName("igroup1"); + createdAccessGroup.setIgroup(createdIgroup); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString())) + .thenReturn("igroup1"); + + when(sanStrategy.getAccessGroup(any())).thenReturn(null); + when(sanStrategy.createAccessGroup(any())).thenReturn(createdAccessGroup); + when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0"); + + // Execute + boolean result = driver.grantAccess(volumeInfo, hostVO, dataStore); + + // Verify + assertTrue(result); + verify(sanStrategy).getAccessGroup(any()); + verify(sanStrategy).createAccessGroup(any()); + verify(sanStrategy).ensureLunMapped(anyString(), anyString(), anyString()); + verify(volumeDao).update(eq(100L), any(VolumeVO.class)); + } + } + + @Test + void testRevokeAccess_NFSVolume_SkipsRevoke() { + // Setup - NFS volumes have no LUN mapping, so revokeAccess is a no-op + when(dataStore.getId()).thenReturn(1L); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + when(volumeVO.getName()).thenReturn("test-volume"); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + when(host.getName()).thenReturn("host1"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + + // Execute - NFS has no iSCSI protocol, so revokeAccessForVolume does nothing + driver.revokeAccess(volumeInfo, host, dataStore); + + // Verify - no LUN unmap operations for NFS + verify(sanStrategy, never()).disableLogicalAccess(any()); + } + } + + @Test + void testRevokeAccess_ISCSIVolume_Success() { + // Setup + when(dataStore.getId()).thenReturn(1L); + when(volumeInfo.getType()).thenReturn(VOLUME); + when(volumeInfo.getId()).thenReturn(100L); + + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeVO.getId()).thenReturn(100L); + when(volumeVO.getName()).thenReturn("test-volume"); + + when(storagePoolDao.findById(1L)).thenReturn(storagePool); + when(storagePool.getId()).thenReturn(1L); + when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails); + + when(host.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1"); + when(host.getName()).thenReturn("host1"); + + VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false); + when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail); + + Lun mockLun = new Lun(); + mockLun.setName("/vol/vol1/lun1"); + mockLun.setUuid("lun-uuid-123"); + CloudStackVolume mockCloudStackVolume = new CloudStackVolume(); + mockCloudStackVolume.setLun(mockLun); + + org.apache.cloudstack.storage.feign.model.Igroup mockIgroup = mock(org.apache.cloudstack.storage.feign.model.Igroup.class); + when(mockIgroup.getName()).thenReturn("igroup1"); + when(mockIgroup.getUuid()).thenReturn("igroup-uuid-123"); + AccessGroup mockAccessGroup = new AccessGroup(); + mockAccessGroup.setIgroup(mockIgroup); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails)) + .thenReturn(sanStrategy); + utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString())) + .thenReturn("igroup1"); + + // Mock the methods called by getCloudStackVolumeByName and getAccessGroupByName + when(sanStrategy.getCloudStackVolume(argThat(map -> + map != null && + "/vol/vol1/lun1".equals(map.get("name")) && + "svm1".equals(map.get("svm.name")) + ))).thenReturn(mockCloudStackVolume); + + when(sanStrategy.getAccessGroup(argThat(map -> + map != null && + "igroup1".equals(map.get("name")) && + "svm1".equals(map.get("svm.name")) + ))).thenReturn(mockAccessGroup); + + when(sanStrategy.validateInitiatorInAccessGroup( + eq("iqn.1993-08.org.debian:01:host1"), + eq("svm1"), + any(Igroup.class) + )).thenReturn(true); + + doNothing().when(sanStrategy).disableLogicalAccess(argThat(map -> + map != null && + "lun-uuid-123".equals(map.get("lun.uuid")) && + "igroup-uuid-123".equals(map.get("igroup.uuid")) + )); + + // Execute + driver.revokeAccess(volumeInfo, host, dataStore); + + // Verify + verify(sanStrategy).getCloudStackVolume(any()); + verify(sanStrategy).getAccessGroup(any()); + verify(sanStrategy).validateInitiatorInAccessGroup(anyString(), anyString(), any(Igroup.class)); + verify(sanStrategy).disableLogicalAccess(any()); + } + } + + @Test + void testCanHostAccessStoragePool_ReturnsTrue() { + assertTrue(driver.canHostAccessStoragePool(host, storagePool)); + } + + @Test + void testIsVmInfoNeeded_ReturnsTrue() { + assertTrue(driver.isVmInfoNeeded()); + } + + @Test + void testIsStorageSupportHA_ReturnsTrue() { + assertTrue(driver.isStorageSupportHA(Storage.StoragePoolType.NetworkFilesystem)); + } + + @Test + void testGetChapInfo_ReturnsNull() { + assertNull(driver.getChapInfo(volumeInfo)); + } + + @Test + void testCanProvideStorageStats_ReturnsFalse() { + assertFalse(driver.canProvideStorageStats()); + } + + @Test + void testCanProvideVolumeStats_ReturnsFalse() { + assertFalse(driver.canProvideVolumeStats()); + } +} diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java index 789615a9f43b..604ab400474c 100644 --- a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.storage.lifecycle; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import org.apache.cloudstack.storage.utils.OntapStorageUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -32,15 +34,35 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.dc.ClusterVO; +import com.cloud.host.HostVO; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.StorageManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import com.cloud.hypervisor.Hypervisor; import java.util.Map; +import java.util.List; +import java.util.ArrayList; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.withSettings; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; import java.util.HashMap; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @ExtendWith(MockitoExtension.class) @@ -58,8 +80,36 @@ public class OntapPrimaryDatastoreLifecycleTest { @Mock private PrimaryDataStoreHelper _dataStoreHelper; + @Mock + private ResourceManager _resourceMgr; + + @Mock + private StorageManager _storageMgr; + + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + + @Mock + private PrimaryDataStoreDao storagePoolDao; + + // Mock object that implements both DataStore and PrimaryDataStoreInfo + // This is needed because attachCluster(DataStore) casts DataStore to PrimaryDataStoreInfo internally + private DataStore dataStore; + + @Mock + private ClusterScope clusterScope; + + @Mock + private ZoneScope zoneScope; + + private List mockHosts; + private Map poolDetails; + @BeforeEach void setUp() { + // Create a mock that implements both DataStore and PrimaryDataStoreInfo interfaces + dataStore = Mockito.mock(DataStore.class, withSettings() + .extraInterfaces(PrimaryDataStoreInfo.class)); ClusterVO clusterVO = new ClusterVO(1L, 1L, "clusterName"); clusterVO.setHypervisorType("KVM"); @@ -73,39 +123,49 @@ void setUp() { volume.setName("testVolume"); when(storageStrategy.createStorageVolume(any(), any())).thenReturn(volume); + // Setup for attachCluster tests + // Configure dataStore mock with necessary methods (works for both DataStore and PrimaryDataStoreInfo) + when(dataStore.getId()).thenReturn(1L); + when(((PrimaryDataStoreInfo) dataStore).getClusterId()).thenReturn(1L); + + // Mock the setDetails method to prevent NullPointerException + Mockito.doNothing().when(((PrimaryDataStoreInfo) dataStore)).setDetails(any()); + + // Mock storagePoolDao to return a valid StoragePoolVO + StoragePoolVO mockStoragePoolVO = new StoragePoolVO(); + mockStoragePoolVO.setId(1L); + when(storagePoolDao.findById(1L)).thenReturn(mockStoragePoolVO); + + mockHosts = new ArrayList<>(); + HostVO host1 = new HostVO("host1-guid"); + host1.setPrivateIpAddress("192.168.1.10"); + host1.setStorageIpAddress("192.168.1.10"); + host1.setClusterId(1L); + HostVO host2 = new HostVO("host2-guid"); + host2.setPrivateIpAddress("192.168.1.11"); + host2.setStorageIpAddress("192.168.1.11"); + host2.setClusterId(1L); + mockHosts.add(host1); + mockHosts.add(host2); + poolDetails = new HashMap<>(); + poolDetails.put("username", "admin"); + poolDetails.put("password", "password"); + poolDetails.put("svmName", "svm1"); + poolDetails.put("protocol", "NFS3"); + poolDetails.put("storageIP", "192.168.1.100"); } @Test public void testInitialize_positive() { - Map dsInfos = new HashMap<>(); - dsInfos.put("username", "testUser"); - dsInfos.put("password", "testPassword"); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); - dsInfos.put("zoneId",1L); - dsInfos.put("podId",1L); - dsInfos.put("clusterId", 1L); - dsInfos.put("name", "testStoragePool"); - dsInfos.put("providerName", "testProvider"); - dsInfos.put("capacityBytes",200000L); - dsInfos.put("managed",true); - dsInfos.put("tags", "testTag"); - dsInfos.put("isTagARule", false); - dsInfos.put("details", new HashMap()); - - try(MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { - storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy); - ontapPrimaryDatastoreLifecycle.initialize(dsInfos); - } - } - - @Test - public void testInitialize_positiveWithIsDisaggregated() { + HashMap detailsMap = new HashMap(); + detailsMap.put(OntapStorageConstants.USERNAME, "testUser"); + detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword"); + detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10"); + detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0"); + detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3"); Map dsInfos = new HashMap<>(); - dsInfos.put("username", "testUser"); - dsInfos.put("password", "testPassword"); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -115,7 +175,7 @@ public void testInitialize_positiveWithIsDisaggregated() { dsInfos.put("managed",true); dsInfos.put("tags", "testTag"); dsInfos.put("isTagARule", false); - dsInfos.put("details", new HashMap()); + dsInfos.put("details", detailsMap); try(MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy); @@ -132,8 +192,14 @@ public void testInitialize_null_Arg() { @Test public void testInitialize_missingRequiredDetailKey() { + + HashMap detailsMap = new HashMap(); + detailsMap.put(OntapStorageConstants.USERNAME, "testUser"); + detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword"); + detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10"); + detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0"); + Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -143,7 +209,7 @@ public void testInitialize_missingRequiredDetailKey() { dsInfos.put("managed",true); dsInfos.put("tags", "testTag"); dsInfos.put("isTagARule", false); - dsInfos.put("details", new HashMap()); + dsInfos.put("details", detailsMap); try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy); @@ -154,8 +220,15 @@ public void testInitialize_missingRequiredDetailKey() { @Test public void testInitialize_invalidCapacityBytes() { + + HashMap detailsMap = new HashMap(); + detailsMap.put(OntapStorageConstants.USERNAME, "testUser"); + detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword"); + detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10"); + detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0"); + detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3"); + Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -165,7 +238,7 @@ public void testInitialize_invalidCapacityBytes() { dsInfos.put("managed",true); dsInfos.put("tags", "testTag"); dsInfos.put("isTagARule", false); - dsInfos.put("details", new HashMap()); + dsInfos.put("details", detailsMap); try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy); @@ -176,7 +249,6 @@ public void testInitialize_invalidCapacityBytes() { @Test public void testInitialize_unmanagedStorage() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -200,7 +272,6 @@ public void testInitialize_unmanagedStorage() { @Test public void testInitialize_nullStoragePoolName() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -224,7 +295,6 @@ public void testInitialize_nullStoragePoolName() { @Test public void testInitialize_nullProviderName() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -248,7 +318,6 @@ public void testInitialize_nullProviderName() { @Test public void testInitialize_nullPodAndClusterAndZone() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",null); dsInfos.put("podId",null); dsInfos.put("clusterId", null); @@ -276,7 +345,6 @@ public void testInitialize_clusterNotKVM() { when(_clusterDao.findById(2L)).thenReturn(clusterVO); Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 2L); @@ -299,8 +367,16 @@ public void testInitialize_clusterNotKVM() { @Test public void testInitialize_unexpectedDetailKey() { + + HashMap detailsMap = new HashMap(); + detailsMap.put(OntapStorageConstants.USERNAME, "testUser"); + detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword"); + detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10"); + detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0"); + detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3"); + detailsMap.put("unexpectedKey", "unexpectedValue"); + Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;unexpectedKey=unexpectedValue"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -310,7 +386,7 @@ public void testInitialize_unexpectedDetailKey() { dsInfos.put("managed",true); dsInfos.put("tags", "testTag"); dsInfos.put("isTagARule", false); - dsInfos.put("details", new HashMap()); + dsInfos.put("details", detailsMap); Exception ex = assertThrows(CloudRuntimeException.class, () -> { try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { @@ -321,4 +397,409 @@ public void testInitialize_unexpectedDetailKey() { assertTrue(ex.getMessage().contains("Unexpected ONTAP detail key in URL")); } + // ========== attachCluster Tests ========== + + @Test + public void testAttachCluster_positive() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock successful host connections + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true on success"); + verify(_resourceMgr, times(1)) + .getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()); + verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_withSingleHost() throws Exception { + // Setup - only one host in cluster + List singleHost = new ArrayList<>(); + singleHost.add(mockHosts.get(0)); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(singleHost); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true with single host"); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_withMultipleHosts() throws Exception { + // Setup - add more hosts + HostVO host3 = new HostVO("host3-guid"); + host3.setPrivateIpAddress("192.168.1.12"); + host3.setStorageIpAddress("192.168.1.12"); + host3.setClusterId(1L); + mockHosts.add(host3); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true with multiple hosts"); + verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_hostConnectionFailure() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock host connection failure for first host + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertFalse(result, "attachCluster should return false on host connection failure"); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + // _dataStoreHelper.attachCluster should NOT be called due to early return + verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_emptyHostList() throws Exception { + // Setup - no hosts in cluster + List emptyHosts = new ArrayList<>(); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(emptyHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true even with no hosts"); + verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong()); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_secondHostConnectionFails() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock: first host succeeds, second host fails + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenReturn(true) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertFalse(result, "attachCluster should return false when any host connection fails"); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_createAccessGroupCalled() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify - createAccessGroup is called with correct AccessGroup structure + assertTrue(result); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + } + } + + // ========== attachZone Tests ========== + + @Test + public void testAttachZone_positive() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock successful host connections + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true on success"); + verify(_resourceMgr, times(1)) + .getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)); + verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_withSingleHost() throws Exception { + // Setup - only one host in zone + List singleHost = new ArrayList<>(); + singleHost.add(mockHosts.get(0)); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(singleHost); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true with single host"); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_withMultipleHosts() throws Exception { + // Setup - add more hosts + HostVO host3 = new HostVO("host3-guid"); + host3.setPrivateIpAddress("192.168.1.12"); + host3.setStorageIpAddress("192.168.1.12"); + host3.setClusterId(1L); + mockHosts.add(host3); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true with multiple hosts"); + verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_hostConnectionFailure() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock host connection failure for first host + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertFalse(result, "attachZone should return false on host connection failure"); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + // _dataStoreHelper.attachZone should NOT be called due to early return + verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_emptyHostList() throws Exception { + // Setup - no hosts in zone + List emptyHosts = new ArrayList<>(); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(emptyHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true even with no hosts"); + verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong()); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_secondHostConnectionFails() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock: first host succeeds, second host fails + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenReturn(true) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertFalse(result, "attachZone should return false when any host connection fails"); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_createAccessGroupCalled() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify - createAccessGroup is called with correct AccessGroup structure + assertTrue(result); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + } + } + } diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java new file mode 100644 index 000000000000..c2a4b56a1fa1 --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java @@ -0,0 +1,841 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.service; + +import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; +import org.apache.cloudstack.storage.feign.client.AggregateFeignClient; +import org.apache.cloudstack.storage.feign.client.JobFeignClient; +import org.apache.cloudstack.storage.feign.client.NetworkFeignClient; +import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import org.apache.cloudstack.storage.feign.client.SvmFeignClient; +import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; +import org.apache.cloudstack.storage.feign.model.Aggregate; +import org.apache.cloudstack.storage.feign.model.IpInterface; +import org.apache.cloudstack.storage.feign.model.IscsiService; +import org.apache.cloudstack.storage.feign.model.Job; +import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +public class StorageStrategyTest { + + @Mock + private AggregateFeignClient aggregateFeignClient; + + @Mock + private VolumeFeignClient volumeFeignClient; + + @Mock + private SvmFeignClient svmFeignClient; + + @Mock + private JobFeignClient jobFeignClient; + + @Mock + private NetworkFeignClient networkFeignClient; + + @Mock + private SANFeignClient sanFeignClient; + + private TestableStorageStrategy storageStrategy; + + // Concrete implementation for testing abstract class + private static class TestableStorageStrategy extends StorageStrategy { + public TestableStorageStrategy(OntapStorage ontapStorage, + AggregateFeignClient aggregateFeignClient, + VolumeFeignClient volumeFeignClient, + SvmFeignClient svmFeignClient, + JobFeignClient jobFeignClient, + NetworkFeignClient networkFeignClient, + SANFeignClient sanFeignClient) { + super(ontapStorage); + // Use reflection to replace the private Feign client fields with mocked ones + injectMockedClient("aggregateFeignClient", aggregateFeignClient); + injectMockedClient("volumeFeignClient", volumeFeignClient); + injectMockedClient("svmFeignClient", svmFeignClient); + injectMockedClient("jobFeignClient", jobFeignClient); + injectMockedClient("networkFeignClient", networkFeignClient); + injectMockedClient("sanFeignClient", sanFeignClient); + } + + private void injectMockedClient(String fieldName, Object mockedClient) { + try { + Field field = StorageStrategy.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(this, mockedClient); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Failed to inject mocked client: " + fieldName, e); + } + } + + @Override + public org.apache.cloudstack.storage.service.model.CloudStackVolume createCloudStackVolume( + org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + return null; + } + + @Override + org.apache.cloudstack.storage.service.model.CloudStackVolume updateCloudStackVolume( + org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + return null; + } + + @Override + public void deleteCloudStackVolume(org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + } + + @Override + public void copyCloudStackVolume(org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + + } + + @Override + public CloudStackVolume getCloudStackVolume( + Map cloudStackVolumeMap) { + return null; + } + + @Override + public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid, String snapshotUuid, String volumePath, String lunUuid, String flexVolName) { + return null; + } + + @Override + public AccessGroup createAccessGroup( + org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + return null; + } + + @Override + public void deleteAccessGroup(org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + } + + @Override + AccessGroup updateAccessGroup( + org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + return null; + } + + @Override + public AccessGroup getAccessGroup( + Map values) { + return null; + } + + @Override + public Map enableLogicalAccess(Map values) { + return null; + } + + @Override + public void disableLogicalAccess(Map values) { + } + + @Override + public Map getLogicalAccess(Map values) { + return null; + } + } + + @BeforeEach + void setUp() { + // Create OntapStorage using constructor (immutable object) + OntapStorage ontapStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", 5000000000L, ProtocolType.NFS3); + + // Note: In real implementation, StorageStrategy constructor creates Feign clients + // For testing, we'll need to mock the FeignClientFactory behavior + storageStrategy = new TestableStorageStrategy(ontapStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + } + + // ========== connect() Tests ========== + + @Test + public void testConnect_positive() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(OntapStorageConstants.RUNNING); + svm.setNfsEnabled(true); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))).thenReturn(aggregateDetail); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertTrue(result, "connect() should return true on success"); + verify(svmFeignClient, times(1)).getSvmResponse(anyMap(), anyString()); + } + + @Test + public void testConnect_svmNotFound() { + // Setup + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(new ArrayList<>()); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertFalse(result, "connect() should return false when SVM is not found"); + } + + @Test + public void testConnect_svmNotRunning() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState("stopped"); + svm.setNfsEnabled(true); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertFalse(result, "connect() should return false when SVM is not running"); + } + + @Test + public void testConnect_nfsNotEnabled() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(OntapStorageConstants.RUNNING); + svm.setNfsEnabled(false); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify + boolean result = storageStrategy.connect(); + assertFalse(result, "connect() should fail when NFS is disabled"); + } + + @Test + public void testConnect_iscsiNotEnabled() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", 5000000000L, ProtocolType.ISCSI); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(OntapStorageConstants.RUNNING); + svm.setIscsiEnabled(false); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify + boolean result = storageStrategy.connect(); + assertFalse(result, "connect() should fail when iSCSI is disabled"); + } + + @Test + public void testConnect_noAggregates() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(OntapStorageConstants.RUNNING); + svm.setNfsEnabled(true); + svm.setAggregates(new ArrayList<>()); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertFalse(result, "connect() should return false when no aggregates are assigned"); + } + + @Test + public void testConnect_nullSvmResponse() { + // Setup + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(null); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertFalse(result, "connect() should return false when SVM response is null"); + } + + // ========== createStorageVolume() Tests ========== + + @Test + public void testCreateStorageVolume_positive() { + // Setup - First connect to populate aggregates + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Setup aggregate details + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Setup job response + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + // Setup job polling + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(OntapStorageConstants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + // Setup volume retrieval after creation + Volume createdVolume = new Volume(); + createdVolume.setName("test-volume"); + createdVolume.setUuid("vol-uuid-1"); + OntapResponse volumeResponse = new OntapResponse<>(); + volumeResponse.setRecords(List.of(createdVolume)); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(volumeResponse); + when(volumeFeignClient.getVolume(anyString(), anyMap())) + .thenReturn(volumeResponse); + + // Execute + Volume result = storageStrategy.createStorageVolume("test-volume", 5000000000L); + + // Verify + assertNotNull(result); + assertEquals("test-volume", result.getName()); + assertEquals("vol-uuid-1", result.getUuid()); + verify(volumeFeignClient, times(1)).createVolumeWithJob(anyString(), any(Volume.class)); + verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1")); + } + + @Test + public void testCreateStorageVolume_invalidSize() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", -1L)); + assertTrue(ex.getMessage().contains("Invalid volume size")); + } + + @Test + public void testCreateStorageVolume_nullSize() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", null)); + assertTrue(ex.getMessage().contains("Invalid volume size")); + } + + @Test + public void testCreateStorageVolume_noAggregates() { + // Execute & Verify - without calling connect first + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("No aggregates available")); + } + + @Test + public void testCreateStorageVolume_aggregateNotOnline() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(null); // null state to simulate offline + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("No suitable aggregates found")); + } + + @Test + public void testCreateStorageVolume_insufficientSpace() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(1000000.0); // Only 1MB available + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); // Request 5GB + assertTrue(ex.getMessage().contains("No suitable aggregates found")); + } + + @Test + public void testCreateStorageVolume_jobFailed() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + setupAggregateForVolumeCreation(); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + // Setup failed job + Job failedJob = new Job(); + failedJob.setUuid("job-uuid-1"); + failedJob.setState(OntapStorageConstants.JOB_FAILURE); + failedJob.setMessage("Volume creation failed"); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(failedJob); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("failed") || ex.getMessage().contains("Job failed")); + } + + @Test + public void testCreateStorageVolume_volumeNotFoundAfterCreation() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + setupAggregateForVolumeCreation(); + setupSuccessfulJobCreation(); + + // Setup empty volume response + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage() != null && ex.getMessage().contains("not found after creation")); + } + + // ========== deleteStorageVolume() Tests ========== + + @Test + public void testDeleteStorageVolume_positive() { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenReturn(jobResponse); + + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(OntapStorageConstants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + // Execute + storageStrategy.deleteStorageVolume(volume); + + // Verify + verify(volumeFeignClient, times(1)).deleteVolume(anyString(), eq("vol-uuid-1")); + verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1")); + } + + @Test + public void testDeleteStorageVolume_jobFailed() { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenReturn(jobResponse); + + Job failedJob = new Job(); + failedJob.setUuid("job-uuid-1"); + failedJob.setState(OntapStorageConstants.JOB_FAILURE); + failedJob.setMessage("Deletion failed"); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(failedJob); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.deleteStorageVolume(volume)); + assertTrue(ex.getMessage().contains("Job failed")); + } + + @Test + public void testDeleteStorageVolume_feignException() { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenThrow(mock(FeignException.FeignClientException.class)); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.deleteStorageVolume(volume)); + assertTrue(ex.getMessage().contains("Failed to delete volume")); + } + + // ========== getStoragePath() Tests ========== + + @Test + public void testGetStoragePath_iscsi() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IscsiService.IscsiServiceTarget target = new IscsiService.IscsiServiceTarget(); + target.setName("iqn.1992-08.com.netapp:sn.123456:vs.1"); + + IscsiService iscsiService = new IscsiService(); + iscsiService.setTarget(target); + + OntapResponse iscsiResponse = new OntapResponse<>(); + iscsiResponse.setRecords(List.of(iscsiService)); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(iscsiResponse); + + // Execute + String result = storageStrategy.getStoragePath(); + + // Verify + assertNotNull(result); + assertEquals("iqn.1992-08.com.netapp:sn.123456:vs.1", result); + verify(sanFeignClient, times(1)).getIscsiServices(anyString(), anyMap()); + } + + @Test + public void testGetStoragePath_iscsi_noService() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getStoragePath()); + assertTrue(ex.getMessage().contains("No iSCSI service found")); + } + + @Test + public void testGetStoragePath_iscsi_noTargetIqn() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IscsiService iscsiService = new IscsiService(); + iscsiService.setTarget(null); + + OntapResponse iscsiResponse = new OntapResponse<>(); + iscsiResponse.setRecords(List.of(iscsiService)); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(iscsiResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getStoragePath()); + assertTrue(ex.getMessage().contains("iSCSI target IQN not found")); + } + + // ========== getNetworkInterface() Tests ========== + + @Test + public void testGetNetworkInterface_nfs() { + // Setup + IpInterface.IpInfo ipInfo = new IpInterface.IpInfo(); + ipInfo.setAddress("192.168.1.50"); + + IpInterface ipInterface = new IpInterface(); + ipInterface.setIp(ipInfo); + + OntapResponse interfaceResponse = new OntapResponse<>(); + interfaceResponse.setRecords(List.of(ipInterface)); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(interfaceResponse); + + // Execute + String result = storageStrategy.getNetworkInterface(); + + // Verify + assertNotNull(result); + assertEquals("192.168.1.50", result); + verify(networkFeignClient, times(1)).getNetworkIpInterfaces(anyString(), anyMap()); + } + + @Test + public void testGetNetworkInterface_iscsi() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IpInterface.IpInfo ipInfo = new IpInterface.IpInfo(); + ipInfo.setAddress("192.168.1.51"); + + IpInterface ipInterface = new IpInterface(); + ipInterface.setIp(ipInfo); + + OntapResponse interfaceResponse = new OntapResponse<>(); + interfaceResponse.setRecords(List.of(ipInterface)); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(interfaceResponse); + + // Execute + String result = storageStrategy.getNetworkInterface(); + + // Verify + assertNotNull(result); + assertEquals("192.168.1.51", result); + } + + @Test + public void testGetNetworkInterface_noInterfaces() { + // Setup + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getNetworkInterface()); + assertTrue(ex.getMessage().contains("No network interfaces found")); + } + + @Test + public void testGetNetworkInterface_feignException() { + // Setup + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenThrow(mock(FeignException.FeignClientException.class)); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getNetworkInterface()); + assertTrue(ex.getMessage().contains("Failed to retrieve network interfaces")); + } + + // ========== Helper Methods ========== + + private void setupSuccessfulConnect() { + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(OntapStorageConstants.RUNNING); + svm.setNfsEnabled(true); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))).thenReturn(aggregateDetail); + } + + private void setupAggregateForVolumeCreation() { + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + } + + private void setupSuccessfulJobCreation() { + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(OntapStorageConstants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + Volume createdVolume = new Volume(); + createdVolume.setName("test-volume"); + createdVolume.setUuid("vol-uuid-1"); + OntapResponse volumeResponse = new OntapResponse<>(); + volumeResponse.setRecords(List.of(createdVolume)); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(volumeResponse); + when(volumeFeignClient.getVolume(anyString(), anyMap())) + .thenReturn(volumeResponse); + } +} diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedNASStrategyTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedNASStrategyTest.java new file mode 100755 index 000000000000..c4d5ddf6878c --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedNASStrategyTest.java @@ -0,0 +1,585 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.service; + +import com.cloud.agent.api.Answer; +import com.cloud.host.HostVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.feign.client.JobFeignClient; +import org.apache.cloudstack.storage.feign.client.NASFeignClient; +import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; +import org.apache.cloudstack.storage.feign.client.AggregateFeignClient; +import org.apache.cloudstack.storage.feign.client.SvmFeignClient; +import org.apache.cloudstack.storage.feign.client.NetworkFeignClient; +import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.Job; +import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +public class UnifiedNASStrategyTest { + + @Mock + private NASFeignClient nasFeignClient; + + @Mock + private VolumeFeignClient volumeFeignClient; + + @Mock + private JobFeignClient jobFeignClient; + + @Mock + private AggregateFeignClient aggregateFeignClient; + + @Mock + private SvmFeignClient svmFeignClient; + + @Mock + private NetworkFeignClient networkFeignClient; + + @Mock + private SANFeignClient sanFeignClient; + + @Mock + private VolumeDao volumeDao; + + @Mock + private EndPointSelector epSelector; + + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + + private TestableUnifiedNASStrategy strategy; + + private OntapStorage ontapStorage; + + @BeforeEach + public void setUp() throws Exception { + ontapStorage = new OntapStorage( + "admin", + "password", + "192.168.1.100", + "svm1", + 100L, + ProtocolType.NFS3 + ); + strategy = new TestableUnifiedNASStrategy(ontapStorage, nasFeignClient, volumeFeignClient, jobFeignClient, aggregateFeignClient, svmFeignClient, networkFeignClient, sanFeignClient); + injectField("volumeDao", volumeDao); + injectField("epSelector", epSelector); + injectField("storagePoolDetailsDao", storagePoolDetailsDao); + } + + private void injectField(String fieldName, Object mockedField) throws Exception { + Field field = UnifiedNASStrategy.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(strategy, mockedField); + } + + private class TestableUnifiedNASStrategy extends UnifiedNASStrategy { + public TestableUnifiedNASStrategy(OntapStorage ontapStorage, + NASFeignClient nasFeignClient, + VolumeFeignClient volumeFeignClient, + JobFeignClient jobFeignClient, + AggregateFeignClient aggregateFeignClient, + SvmFeignClient svmFeignClient, + NetworkFeignClient networkFeignClient, + SANFeignClient sanFeignClient) { + super(ontapStorage); + // All Feign clients are in StorageStrategy parent class + injectParentMockedClient("nasFeignClient", nasFeignClient); + injectParentMockedClient("volumeFeignClient", volumeFeignClient); + injectParentMockedClient("jobFeignClient", jobFeignClient); + injectParentMockedClient("aggregateFeignClient", aggregateFeignClient); + injectParentMockedClient("svmFeignClient", svmFeignClient); + injectParentMockedClient("networkFeignClient", networkFeignClient); + injectParentMockedClient("sanFeignClient", sanFeignClient); + } + + private void injectParentMockedClient(String fieldName, Object mockedClient) { + try { + Field field = StorageStrategy.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(this, mockedClient); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Failed to inject parent mocked client: " + fieldName, e); + } + } + } + + // Test createCloudStackVolume - Success + @Test + public void testCreateCloudStackVolume_Success() throws Exception { + // Setup CloudStackVolume + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeObject volumeObject = mock(VolumeObject.class); + VolumeVO volumeVO = mock(VolumeVO.class); + EndPoint endPoint = mock(EndPoint.class); + Answer answer = new Answer(null, true, "Success"); + + when(cloudStackVolume.getDatastoreId()).thenReturn("1"); + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject); + when(volumeObject.getId()).thenReturn(100L); + when(volumeObject.getUuid()).thenReturn("volume-uuid-123"); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true); + when(epSelector.select(volumeObject)).thenReturn(endPoint); + when(endPoint.sendMessage(any(CreateObjectCommand.class))).thenReturn(answer); + + // Execute + CloudStackVolume result = strategy.createCloudStackVolume(cloudStackVolume); + + // Verify + assertNotNull(result); + verify(volumeDao).update(anyLong(), any(VolumeVO.class)); + verify(epSelector).select(volumeObject); + verify(endPoint).sendMessage(any(CreateObjectCommand.class)); + } + + // Test createCloudStackVolume - Volume Not Found + @Test + public void testCreateCloudStackVolume_VolumeNotFound() { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeObject volumeObject = mock(VolumeObject.class); + + when(cloudStackVolume.getDatastoreId()).thenReturn("1"); + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject); + when(volumeObject.getId()).thenReturn(100L); + when(volumeDao.findById(100L)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createCloudStackVolume(cloudStackVolume); + }); + } + + // Test createCloudStackVolume - KVM Host Creation Failed + @Test + public void testCreateCloudStackVolume_KVMHostFailed() { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeObject volumeObject = mock(VolumeObject.class); + VolumeVO volumeVO = mock(VolumeVO.class); + EndPoint endPoint = mock(EndPoint.class); + Answer answer = new Answer(null, false, "Failed to create volume"); + + when(cloudStackVolume.getDatastoreId()).thenReturn("1"); + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject); + when(volumeObject.getId()).thenReturn(100L); + when(volumeObject.getUuid()).thenReturn("volume-uuid-123"); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true); + when(epSelector.select(volumeObject)).thenReturn(endPoint); + when(endPoint.sendMessage(any(CreateObjectCommand.class))).thenReturn(answer); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createCloudStackVolume(cloudStackVolume); + }); + } + + // Test createCloudStackVolume - No Endpoint + @Test + public void testCreateCloudStackVolume_NoEndpoint() { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeObject volumeObject = mock(VolumeObject.class); + VolumeVO volumeVO = mock(VolumeVO.class); + + when(cloudStackVolume.getDatastoreId()).thenReturn("1"); + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject); + when(volumeObject.getId()).thenReturn(100L); + when(volumeObject.getUuid()).thenReturn("volume-uuid-123"); + when(volumeDao.findById(100L)).thenReturn(volumeVO); + when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true); + when(epSelector.select(volumeObject)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createCloudStackVolume(cloudStackVolume); + }); + } + + // Test createAccessGroup - Success + @Test + public void testCreateAccessGroup_Success() throws Exception { + // Setup + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123"); + details.put(OntapStorageConstants.VOLUME_NAME, "vol1"); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageIpAddress()).thenReturn("10.0.0.1"); + hosts.add(host1); + + ExportPolicy createdPolicy = mock(ExportPolicy.class); + when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE); + when(createdPolicy.getName()).thenReturn("export-policy-1"); + + OntapResponse policyResponse = new OntapResponse<>(); + List policies = new ArrayList<>(); + policies.add(createdPolicy); + policyResponse.setRecords(policies); + + JobResponse jobResponse = new JobResponse(); + Job job = new Job(); + job.setUuid("job-uuid-123"); + job.setState(OntapStorageConstants.JOB_SUCCESS); + jobResponse.setJob(job); + + // Removed primaryDataStoreInfo mock - using storage pool ID directly + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + when(accessGroup.getStoragePoolId()).thenReturn(1L); + when(accessGroup.getHostsToConnect()).thenReturn(hosts); + doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class)); + when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse); + when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse); + when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job); + doNothing().when(storagePoolDetailsDao).addDetail(anyLong(), anyString(), anyString(), eq(true)); + + // Execute + AccessGroup result = strategy.createAccessGroup(accessGroup); + + // Verify + assertNotNull(result); + verify(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class)); + verify(nasFeignClient).getExportPolicyResponse(anyString(), anyMap()); + verify(volumeFeignClient).updateVolumeRebalancing(anyString(), eq("vol-uuid-123"), any()); + verify(storagePoolDetailsDao, times(2)).addDetail(anyLong(), anyString(), anyString(), eq(true)); + } + + // Test createAccessGroup - Failed to Create Policy + @Test + public void testCreateAccessGroup_FailedToCreatePolicy() { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123"); + details.put(OntapStorageConstants.VOLUME_NAME, "vol1"); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageIpAddress()).thenReturn("10.0.0.1"); + hosts.add(host1); + + // Removed primaryDataStoreInfo mock - using storage pool ID directly + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + when(accessGroup.getHostsToConnect()).thenReturn(hosts); + doThrow(new RuntimeException("Failed to create policy")).when(nasFeignClient) + .createExportPolicy(anyString(), any(ExportPolicy.class)); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createAccessGroup(accessGroup); + }); + } + + // Test createAccessGroup - Failed to Verify Policy + @Test + public void testCreateAccessGroup_FailedToVerifyPolicy() { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123"); + details.put(OntapStorageConstants.VOLUME_NAME, "vol1"); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageIpAddress()).thenReturn("10.0.0.1"); + hosts.add(host1); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + // Removed primaryDataStoreInfo mock - using storage pool ID directly + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + when(accessGroup.getHostsToConnect()).thenReturn(hosts); + doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class)); + when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(emptyResponse); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createAccessGroup(accessGroup); + }); + } + + // Test createAccessGroup - Job Timeout + // Note: This test is simplified to avoid 200 second wait time. + // In reality, testing timeout would require mocking Thread.sleep() or refactoring the code. + @Test + public void testCreateAccessGroup_JobFailure() throws Exception { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123"); + details.put(OntapStorageConstants.VOLUME_NAME, "vol1"); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageIpAddress()).thenReturn("10.0.0.1"); + hosts.add(host1); + + ExportPolicy createdPolicy = mock(ExportPolicy.class); + when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE); + when(createdPolicy.getName()).thenReturn("export-policy-1"); + + OntapResponse policyResponse = new OntapResponse<>(); + List policies = new ArrayList<>(); + policies.add(createdPolicy); + policyResponse.setRecords(policies); + + JobResponse jobResponse = new JobResponse(); + Job job = new Job(); + job.setUuid("job-uuid-123"); + job.setState(OntapStorageConstants.JOB_FAILURE); // Set to FAILURE instead of timeout + job.setMessage("Job failed"); + jobResponse.setJob(job); + + // Removed primaryDataStoreInfo mock - using storage pool ID directly + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + when(accessGroup.getStoragePoolId()).thenReturn(1L); + when(accessGroup.getHostsToConnect()).thenReturn(hosts); + doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class)); + when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse); + when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse); + when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.createAccessGroup(accessGroup); + }); + } + + // Test createAccessGroup - Host with Private IP + @Test + public void testCreateAccessGroup_HostWithPrivateIP() throws Exception { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123"); + details.put(OntapStorageConstants.VOLUME_NAME, "vol1"); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageIpAddress()).thenReturn(null); + when(host1.getPrivateIpAddress()).thenReturn("192.168.1.10"); + hosts.add(host1); + + ExportPolicy createdPolicy = mock(ExportPolicy.class); + when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE); + when(createdPolicy.getName()).thenReturn("export-policy-1"); + + OntapResponse policyResponse = new OntapResponse<>(); + List policies = new ArrayList<>(); + policies.add(createdPolicy); + policyResponse.setRecords(policies); + + JobResponse jobResponse = new JobResponse(); + Job job = new Job(); + job.setUuid("job-uuid-123"); + job.setState(OntapStorageConstants.JOB_SUCCESS); + jobResponse.setJob(job); + + // Removed primaryDataStoreInfo mock - using storage pool ID directly + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + when(accessGroup.getStoragePoolId()).thenReturn(1L); + when(accessGroup.getHostsToConnect()).thenReturn(hosts); + doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class)); + when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse); + when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse); + when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job); + doNothing().when(storagePoolDetailsDao).addDetail(anyLong(), anyString(), anyString(), eq(true)); + + // Execute + AccessGroup result = strategy.createAccessGroup(accessGroup); + + // Verify + assertNotNull(result); + ArgumentCaptor policyCaptor = ArgumentCaptor.forClass(ExportPolicy.class); + verify(nasFeignClient).createExportPolicy(anyString(), policyCaptor.capture()); + ExportPolicy capturedPolicy = policyCaptor.getValue(); + assertEquals("192.168.1.10/32", capturedPolicy.getRules().get(0).getClients().get(0).getMatch()); + } + + // Test deleteAccessGroup - Success + @Test + public void testDeleteAccessGroup_Success() { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.EXPORT_POLICY_NAME, "export-policy-1"); + details.put(OntapStorageConstants.EXPORT_POLICY_ID, "1"); + + when(accessGroup.getStoragePoolId()).thenReturn(1L); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + // Removed primaryDataStoreInfo.getName() - not used + doNothing().when(nasFeignClient).deleteExportPolicyById(anyString(), anyString()); + + // Execute + strategy.deleteAccessGroup(accessGroup); + + // Verify + verify(nasFeignClient).deleteExportPolicyById(anyString(), eq("1")); + } + + // Test deleteAccessGroup - Null AccessGroup + @Test + public void testDeleteAccessGroup_NullAccessGroup() { + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteAccessGroup(null); + }); + } + + // Test deleteAccessGroup - Null PrimaryDataStoreInfo + @Test + public void testDeleteAccessGroup_NullPrimaryDataStoreInfo() { + AccessGroup accessGroup = mock(AccessGroup.class); + when(accessGroup.getStoragePoolId()).thenReturn(null); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteAccessGroup(accessGroup); + }); + } + + // Test deleteAccessGroup - Failed to Delete + @Test + public void testDeleteAccessGroup_Failed() { + AccessGroup accessGroup = mock(AccessGroup.class); + Map details = new HashMap<>(); + details.put(OntapStorageConstants.EXPORT_POLICY_NAME, "export-policy-1"); + details.put(OntapStorageConstants.EXPORT_POLICY_ID, "1"); + + when(accessGroup.getStoragePoolId()).thenReturn(1L); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + doThrow(new RuntimeException("Failed to delete")).when(nasFeignClient) + .deleteExportPolicyById(anyString(), anyString()); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteAccessGroup(accessGroup); + }); + } + + // Test deleteCloudStackVolume - Success + @Test + public void testDeleteCloudStackVolume_Success() throws Exception { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeInfo volumeInfo = mock(VolumeInfo.class); + EndPoint endpoint = mock(EndPoint.class); + Answer answer = mock(Answer.class); + + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo); + when(epSelector.select(volumeInfo)).thenReturn(endpoint); + when(endpoint.sendMessage(any())).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + + // Execute - should not throw exception + strategy.deleteCloudStackVolume(cloudStackVolume); + + // Verify endpoint was selected and message sent + verify(epSelector).select(volumeInfo); + verify(endpoint).sendMessage(any()); + } + + // Test deleteCloudStackVolume - Endpoint Not Found + @Test + public void testDeleteCloudStackVolume_EndpointNotFound() { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeInfo volumeInfo = mock(VolumeInfo.class); + + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo); + when(epSelector.select(volumeInfo)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteCloudStackVolume(cloudStackVolume); + }); + } + + // Test deleteCloudStackVolume - Answer Result False + @Test + public void testDeleteCloudStackVolume_AnswerResultFalse() throws Exception { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeInfo volumeInfo = mock(VolumeInfo.class); + EndPoint endpoint = mock(EndPoint.class); + Answer answer = mock(Answer.class); + + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo); + when(epSelector.select(volumeInfo)).thenReturn(endpoint); + when(endpoint.sendMessage(any())).thenReturn(answer); + when(answer.getResult()).thenReturn(false); + when(answer.getDetails()).thenReturn("Failed to delete volume file"); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteCloudStackVolume(cloudStackVolume); + }); + } + + // Test deleteCloudStackVolume - Answer is Null + @Test + public void testDeleteCloudStackVolume_AnswerNull() throws Exception { + CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class); + VolumeInfo volumeInfo = mock(VolumeInfo.class); + EndPoint endpoint = mock(EndPoint.class); + + when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo); + when(epSelector.select(volumeInfo)).thenReturn(endpoint); + when(endpoint.sendMessage(any())).thenReturn(null); + + assertThrows(CloudRuntimeException.class, () -> { + strategy.deleteCloudStackVolume(cloudStackVolume); + }); + } +} diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedSANStrategyTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedSANStrategyTest.java new file mode 100644 index 000000000000..b3f2364656a7 --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/UnifiedSANStrategyTest.java @@ -0,0 +1,1807 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.service; + +import com.cloud.host.HostVO; +import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunMap; +import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import org.apache.cloudstack.storage.utils.OntapStorageUtils; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +class UnifiedSANStrategyTest { + + @Mock + private SANFeignClient sanFeignClient; + + @Mock + private OntapStorage ontapStorage; + + @Mock + private PrimaryDataStoreInfo primaryDataStoreInfo; + + @Mock + private Scope scope; + + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + + private UnifiedSANStrategy unifiedSANStrategy; + private String authHeader; + + @BeforeEach + void setUp() { + lenient().when(ontapStorage.getStorageIP()).thenReturn("192.168.1.100"); + lenient().when(ontapStorage.getUsername()).thenReturn("admin"); + lenient().when(ontapStorage.getPassword()).thenReturn("password"); + lenient().when(ontapStorage.getSvmName()).thenReturn("svm1"); + + unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage); + + // Use reflection to inject the mock SANFeignClient (field is in parent StorageStrategy class) + try { + java.lang.reflect.Field sanFeignClientField = StorageStrategy.class.getDeclaredField("sanFeignClient"); + sanFeignClientField.setAccessible(true); + sanFeignClientField.set(unifiedSANStrategy, sanFeignClient); + + // Also inject the storage field from parent class to ensure proper mocking + java.lang.reflect.Field storageField = StorageStrategy.class.getDeclaredField("storage"); + storageField.setAccessible(true); + storageField.set(unifiedSANStrategy, ontapStorage); + + // Inject storagePoolDetailsDao + java.lang.reflect.Field storagePoolDetailsDaoField = UnifiedSANStrategy.class.getDeclaredField("storagePoolDetailsDao"); + storagePoolDetailsDaoField.setAccessible(true); + storagePoolDetailsDaoField.set(unifiedSANStrategy, storagePoolDetailsDao); + } catch (Exception e) { + throw new RuntimeException(e); + } + + authHeader = "Basic YWRtaW46cGFzc3dvcmQ="; // Base64 encoded admin:password + } + + @Test + void testCreateCloudStackVolume_Success() { + // Setup + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + Lun createdLun = new Lun(); + createdLun.setName("/vol/vol1/lun1"); + createdLun.setUuid("lun-uuid-123"); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(createdLun)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLun(eq(authHeader), eq(true), any(Lun.class))) + .thenReturn(response); + + // Execute + CloudStackVolume result = unifiedSANStrategy.createCloudStackVolume(request); + + // Verify + assertNotNull(result); + assertNotNull(result.getLun()); + assertEquals("lun-uuid-123", result.getLun().getUuid()); + assertEquals("/vol/vol1/lun1", result.getLun().getName()); + + verify(sanFeignClient).createLun(eq(authHeader), eq(true), any(Lun.class)); + } + } + + @Test + void testCreateCloudStackVolume_NullRequest_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createCloudStackVolume(null)); + } + + @Test + void testCreateCloudStackVolume_FeignException_ThrowsCloudRuntimeException() { + // Setup + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLun(eq(authHeader), eq(true), any(Lun.class))) + .thenThrow(feignException); + + // Execute & Verify + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createCloudStackVolume(request)); + } + } + + @Test + void testDeleteCloudStackVolume_Success() { + // Setup + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + lun.setUuid("lun-uuid-123"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doNothing().when(sanFeignClient).deleteLun(eq(authHeader), eq("lun-uuid-123"), anyMap()); + + // Execute + unifiedSANStrategy.deleteCloudStackVolume(request); + + // Verify + verify(sanFeignClient).deleteLun(eq(authHeader), eq("lun-uuid-123"), anyMap()); + } + } + + @Test + void testDeleteCloudStackVolume_NotFound_SkipsDeletion() { + // Setup + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + lun.setUuid("lun-uuid-123"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doThrow(feignException).when(sanFeignClient).deleteLun(eq(authHeader), eq("lun-uuid-123"), anyMap()); + + // Execute - should not throw exception + assertDoesNotThrow(() -> unifiedSANStrategy.deleteCloudStackVolume(request)); + } + } + + @Test + void testGetCloudStackVolume_Success() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + lun.setUuid("lun-uuid-123"); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lun)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + CloudStackVolume result = unifiedSANStrategy.getCloudStackVolume(values); + + // Verify + assertNotNull(result); + assertNotNull(result.getLun()); + assertEquals("lun-uuid-123", result.getLun().getUuid()); + assertEquals("/vol/vol1/lun1", result.getLun().getName()); + } + } + + @Test + void testGetCloudStackVolume_NotFound_ReturnsNull() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + CloudStackVolume result = unifiedSANStrategy.getCloudStackVolume(values); + + // Verify + assertNull(result); + } + } + + @Test + void testCreateAccessGroup_Success() { + // Setup + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + when(host1.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + Igroup createdIgroup = new Igroup(); + createdIgroup.setName("igroup1"); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(createdIgroup)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.createIgroup(eq(authHeader), eq(true), any(Igroup.class))) + .thenReturn(response); + + // Execute + AccessGroup result = unifiedSANStrategy.createAccessGroup(accessGroup); + + // Verify + assertNotNull(result); + assertNotNull(result.getIgroup()); + assertEquals("igroup1", result.getIgroup().getName()); + + ArgumentCaptor igroupCaptor = ArgumentCaptor.forClass(Igroup.class); + verify(sanFeignClient).createIgroup(eq(authHeader), eq(true), igroupCaptor.capture()); + + Igroup capturedIgroup = igroupCaptor.getValue(); + assertEquals("igroup1", capturedIgroup.getName()); + assertNotNull(capturedIgroup.getInitiators()); + assertEquals(1, capturedIgroup.getInitiators().size()); + } + } + + @Test + void testCreateAccessGroup_AlreadyExists_ReturnsSuccessfully() { + // Setup + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + when(host1.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(409); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.createIgroup(eq(authHeader), eq(true), any(Igroup.class))) + .thenThrow(feignException); + + // Execute + AccessGroup result = unifiedSANStrategy.createAccessGroup(accessGroup); + + // Verify - should not throw exception + assertNotNull(result); + } + } + + @Test + void testDeleteAccessGroup_Success() { + // Setup + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + lenient().when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + Igroup igroup = new Igroup(); + igroup.setName("igroup1"); + // Use reflection to set UUID since there's no setter + try { + java.lang.reflect.Field uuidField = Igroup.class.getDeclaredField("uuid"); + uuidField.setAccessible(true); + uuidField.set(igroup, "igroup-uuid-123"); + } catch (Exception e) { + throw new RuntimeException(e); + } + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(igroup)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + unifiedSANStrategy.deleteAccessGroup(accessGroup); + + // Verify + verify(sanFeignClient).deleteIgroup(eq(authHeader), eq("igroup-uuid-123")); + } + } + + @Test + void testDeleteAccessGroup_NotFound_SkipsDeletion() { + // Setup + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + lenient().when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + // Execute - should not throw exception + assertDoesNotThrow(() -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + } + + @Test + void testGetAccessGroup_Success() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "igroup1"); + + Igroup igroup = new Igroup(); + igroup.setName("igroup1"); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(igroup)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + AccessGroup result = unifiedSANStrategy.getAccessGroup(values); + + // Verify + assertNotNull(result); + assertNotNull(result.getIgroup()); + assertEquals("igroup1", result.getIgroup().getName()); + } + } + + @Test + void testEnableLogicalAccess_Success() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(0); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLunMap(eq(authHeader), eq(true), any(LunMap.class))) + .thenReturn(new OntapResponse<>()); + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + Map result = unifiedSANStrategy.enableLogicalAccess(values); + + // Verify + assertNotNull(result); + assertTrue(result.containsKey(OntapStorageConstants.LOGICAL_UNIT_NUMBER)); + assertEquals("0", result.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER)); + + verify(sanFeignClient).createLunMap(eq(authHeader), eq(true), any(LunMap.class)); + } + } + + @Test + void testEnableLogicalAccess_AlreadyMapped_ReturnsLunNumber() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(5); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + Exception exception = new RuntimeException("LUN already mapped to this group"); + doThrow(exception).when(sanFeignClient).createLunMap(eq(authHeader), eq(true), any(LunMap.class)); + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + Map result = unifiedSANStrategy.enableLogicalAccess(values); + + // Verify + assertNotNull(result); + assertEquals("5", result.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER)); + } + } + + @Test + void testDisableLogicalAccess_Success() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doNothing().when(sanFeignClient).deleteLunMap(eq(authHeader), eq("lun-uuid-123"), eq("igroup-uuid-123")); + + // Execute + unifiedSANStrategy.disableLogicalAccess(values); + + // Verify + verify(sanFeignClient).deleteLunMap(eq(authHeader), eq("lun-uuid-123"), eq("igroup-uuid-123")); + } + } + + @Test + void testDisableLogicalAccess_NotFound_SkipsDeletion() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doThrow(feignException).when(sanFeignClient).deleteLunMap(eq(authHeader), eq("lun-uuid-123"), eq("igroup-uuid-123")); + + // Execute - should not throw exception + assertDoesNotThrow(() -> unifiedSANStrategy.disableLogicalAccess(values)); + } + } + + @Test + void testGetLogicalAccess_Success() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(3); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + Map result = unifiedSANStrategy.getLogicalAccess(values); + + // Verify + assertNotNull(result); + assertEquals("3", result.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER)); + } + } + + @Test + void testGetLogicalAccess_NotFound_ReturnsNull() { + // Setup + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenThrow(new RuntimeException("Not found")); + + // Execute + Map result = unifiedSANStrategy.getLogicalAccess(values); + + // Verify + assertNull(result); + } + } + + @Test + void testEnsureLunMapped_ExistingMapping_ReturnsLunNumber() { + // Setup + String svmName = "svm1"; + String lunName = "/vol/vol1/lun1"; + String accessGroupName = "igroup1"; + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(2); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + String result = unifiedSANStrategy.ensureLunMapped(svmName, lunName, accessGroupName); + + // Verify + assertEquals("2", result); + verify(sanFeignClient, never()).createLunMap(any(), anyBoolean(), any()); + } + } + + @Test + void testEnsureLunMapped_CreatesNewMapping_ReturnsLunNumber() { + // Setup + String svmName = "svm1"; + String lunName = "/vol/vol1/lun1"; + String accessGroupName = "igroup1"; + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(4); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + // First call returns empty (no existing mapping), second call returns the new mapping + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenReturn(emptyResponse) // First call - no records + .thenReturn(response); // Second call after creation + + when(sanFeignClient.createLunMap(eq(authHeader), eq(true), any(LunMap.class))) + .thenReturn(new OntapResponse<>()); + + // Execute + String result = unifiedSANStrategy.ensureLunMapped(svmName, lunName, accessGroupName); + + // Verify + assertEquals("4", result); + verify(sanFeignClient).createLunMap(eq(authHeader), eq(true), any(LunMap.class)); + } + } + + @Test + void testValidateInitiatorInAccessGroup_InitiatorFound_ReturnsTrue() { + // Setup + String hostInitiator = "iqn.1993-08.org.debian:01:host1"; + String svmName = "svm1"; + String accessGroupName = "igroup1"; + + Initiator initiator = new Initiator(); + initiator.setName(hostInitiator); + + Igroup igroup = new Igroup(); + igroup.setName(accessGroupName); + igroup.setInitiators(List.of(initiator)); + + // Execute + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(hostInitiator, svmName, igroup); + + // Verify + assertTrue(result); + } + + @Test + void testValidateInitiatorInAccessGroup_InitiatorNotFound_ReturnsFalse() { + // Setup + String hostInitiator = "iqn.1993-08.org.debian:01:host1"; + String svmName = "svm1"; + String accessGroupName = "igroup1"; + + Initiator differentInitiator = new Initiator(); + differentInitiator.setName("iqn.1993-08.org.debian:01:host2"); + + Igroup igroup = new Igroup(); + igroup.setName(accessGroupName); + igroup.setInitiators(List.of(differentInitiator)); + + // Execute + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(hostInitiator, svmName, igroup); + + // Verify + assertFalse(result); + } + + @Test + void testValidateInitiatorInAccessGroup_EmptyInitiator_ReturnsFalse() { + Igroup igroup = new Igroup(); + String accessGroupName = "igroup1"; + igroup.setName(accessGroupName); + Initiator differentInitiator = new Initiator(); + differentInitiator.setName("iqn.1993-08.org.debian:01:host2"); + igroup.setInitiators(List.of(differentInitiator)); + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup("", "svm1", igroup); + assertFalse(result); + + result = unifiedSANStrategy.validateInitiatorInAccessGroup(null, "svm1", igroup); + assertFalse(result); + } + + @Test + void testValidateInitiatorInAccessGroup_IgroupNotFound_ReturnsFalse() { + // Setup + String hostInitiator = "iqn.1993-08.org.debian:01:host1"; + + Igroup emptyIgroup = new Igroup(); + emptyIgroup.setName("igroup1"); + emptyIgroup.setInitiators(null); + + // Execute + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(hostInitiator, "svm1", emptyIgroup); + + // Verify + assertFalse(result); + } + + @Test + void testCopyCloudStackVolume_NullRequest_DoesNotThrow() { + // copyCloudStackVolume is not yet implemented (no-op), so it should not throw + assertDoesNotThrow(() -> unifiedSANStrategy.copyCloudStackVolume(null)); + } + + @Test + void testCopyCloudStackVolume_NullLun_DoesNotThrow() { + // copyCloudStackVolume is not yet implemented (no-op), so it should not throw + CloudStackVolume request = new CloudStackVolume(); + request.setLun(null); + + assertDoesNotThrow(() -> unifiedSANStrategy.copyCloudStackVolume(request)); + } + + @Test + void testCopyCloudStackVolume_ValidRequest_DoesNotThrow() { + // copyCloudStackVolume is not yet implemented (no-op), so it should not throw + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + assertDoesNotThrow(() -> unifiedSANStrategy.copyCloudStackVolume(request)); + } + + @Test + void testSetOntapStorage() { + OntapStorage newStorage = mock(OntapStorage.class); + assertDoesNotThrow(() -> unifiedSANStrategy.setOntapStorage(newStorage)); + } + + @Test + void testUpdateCloudStackVolume_ReturnsNull() { + CloudStackVolume request = new CloudStackVolume(); + CloudStackVolume result = unifiedSANStrategy.updateCloudStackVolume(request); + assertNull(result); + } + + @Test + void testUpdateAccessGroup_ReturnsNull() { + AccessGroup accessGroup = new AccessGroup(); + AccessGroup result = unifiedSANStrategy.updateAccessGroup(accessGroup); + assertNull(result); + } + + @Test + void testCreateAccessGroup_NullPrimaryDataStoreInfo_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_NullDetails_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_EmptyDetails_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(new HashMap<>()); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_NullHostsToConnect_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + accessGroup.setHostsToConnect(null); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_EmptyHostsToConnect_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + accessGroup.setHostsToConnect(new ArrayList<>()); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_HostWithoutIQN_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + lenient().when(scope.getScopeType()).thenReturn(com.cloud.storage.ScopeType.CLUSTER); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageUrl()).thenReturn("invalid-storage-url"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_HostWithNullStorageUrl_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + lenient().when(scope.getScopeType()).thenReturn(com.cloud.storage.ScopeType.CLUSTER); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageUrl()).thenReturn(null); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + + @Test + void testCreateAccessGroup_FeignExceptionNon409_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.createIgroup(eq(authHeader), eq(true), any(Igroup.class))) + .thenThrow(feignException); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + } + + @Test + void testCreateAccessGroup_EmptyResponseRecords_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + accessGroup.setScope(scope); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.createIgroup(eq(authHeader), eq(true), any(Igroup.class))) + .thenReturn(response); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createAccessGroup(accessGroup)); + } + } + + @Test + void testDeleteAccessGroup_NullAccessGroup_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteAccessGroup(null)); + } + + @Test + void testDeleteAccessGroup_NullPrimaryDataStoreInfo_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + + @Test + void testDeleteAccessGroup_EmptyIgroupUuid_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + Igroup igroup = new Igroup(); + igroup.setName("igroup1"); + // UUID is null + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(igroup)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenReturn(response); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + } + + @Test + void testDeleteAccessGroup_FeignExceptionNon404_ThrowsException() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + + Map details = new HashMap<>(); + details.put(OntapStorageConstants.SVM_NAME, "svm1"); + details.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name()); + + lenient().when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details); + + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + } + + @Test + void testGetAccessGroup_NullValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getAccessGroup(null)); + } + + @Test + void testGetAccessGroup_EmptyValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getAccessGroup(new HashMap<>())); + } + + @Test + void testGetAccessGroup_NullSvmName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.NAME, "igroup1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getAccessGroup(values)); + } + + @Test + void testGetAccessGroup_NullIgroupName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getAccessGroup(values)); + } + + @Test + void testGetAccessGroup_FeignExceptionNon404_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "igroup1"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getAccessGroup(values)); + } + } + + @Test + void testGetCloudStackVolume_NullValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getCloudStackVolume(null)); + } + + @Test + void testGetCloudStackVolume_EmptyValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getCloudStackVolume(new HashMap<>())); + } + + @Test + void testGetCloudStackVolume_NullSvmName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getCloudStackVolume(values)); + } + + @Test + void testGetCloudStackVolume_NullLunName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getCloudStackVolume(values)); + } + + @Test + void testGetCloudStackVolume_FeignExceptionNon404_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getCloudStackVolume(values)); + } + } + + @Test + void testEnableLogicalAccess_NullValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(null)); + } + + @Test + void testEnableLogicalAccess_MissingSvmName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testEnableLogicalAccess_MissingLunName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testEnableLogicalAccess_MissingIgroupName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testEnableLogicalAccess_FetchLunMapFails_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLunMap(eq(authHeader), eq(true), any(LunMap.class))) + .thenReturn(new OntapResponse<>()); + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenThrow(new RuntimeException("Failed to fetch LunMap")); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + } + + @Test + void testDisableLogicalAccess_NullValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(null)); + } + + @Test + void testDisableLogicalAccess_MissingLunUuid_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(values)); + } + + @Test + void testDisableLogicalAccess_MissingIgroupUuid_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(values)); + } + + @Test + void testDisableLogicalAccess_FeignExceptionNon404_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doThrow(feignException).when(sanFeignClient).deleteLunMap(eq(authHeader), eq("lun-uuid-123"), eq("igroup-uuid-123")); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(values)); + } + } + + @Test + void testGetLogicalAccess_NullValues_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getLogicalAccess(null)); + } + + @Test + void testGetLogicalAccess_MissingSvmName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getLogicalAccess(values)); + } + + @Test + void testEnsureLunMapped_CreateNewMapping_Success() { + String svmName = "svm1"; + String lunName = "/vol/vol1/lun1"; + String accessGroupName = "igroup1"; + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(4); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + // First call returns empty (no existing mapping) + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenReturn(emptyResponse) // First call - check existing + .thenReturn(response); // Second call - after creation + + when(sanFeignClient.createLunMap(eq(authHeader), eq(true), any(LunMap.class))) + .thenReturn(new OntapResponse<>()); + + String result = unifiedSANStrategy.ensureLunMapped(svmName, lunName, accessGroupName); + + assertEquals("4", result); + verify(sanFeignClient).createLunMap(eq(authHeader), eq(true), any(LunMap.class)); + } + } + + @Test + void testEnsureLunMapped_FailedToMap_ThrowsException() { + String svmName = "svm1"; + String lunName = "/vol/vol1/lun1"; + String accessGroupName = "igroup1"; + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenReturn(emptyResponse) // First call - no existing + .thenReturn(emptyResponse); // Second call - still empty after creation + + when(sanFeignClient.createLunMap(eq(authHeader), eq(true), any(LunMap.class))) + .thenReturn(new OntapResponse<>()); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.ensureLunMapped(svmName, lunName, accessGroupName)); + } + } + + @Test + void testValidateInitiatorInAccessGroup_NullIgroupInitiators_ReturnsFalse() { + String hostInitiator = "iqn.1993-08.org.debian:01:host1"; + String svmName = "svm1"; + String accessGroupName = "igroup1"; + + Igroup igroup = new Igroup(); + igroup.setName(accessGroupName); + igroup.setInitiators(null); + + // Execute + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(hostInitiator, svmName, igroup); + + // Verify + assertFalse(result); + } + + // ============= Additional Test Cases for Complete Coverage ============= + + @Test + void testCreateCloudStackVolume_NullLun_ThrowsException() { + CloudStackVolume request = new CloudStackVolume(); + request.setLun(null); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createCloudStackVolume(request)); + } + + @Test + void testCreateCloudStackVolume_EmptyResponse_ThrowsException() { + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLun(eq(authHeader), eq(true), any(Lun.class))) + .thenReturn(emptyResponse); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createCloudStackVolume(request)); + } + } + + @Test + void testCreateCloudStackVolume_NullResponse_ThrowsException() { + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.createLun(eq(authHeader), eq(true), any(Lun.class))) + .thenReturn(null); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.createCloudStackVolume(request)); + } + } + + @Test + void testDeleteCloudStackVolume_NullRequest_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteCloudStackVolume(null)); + } + + @Test + void testDeleteCloudStackVolume_NullLun_ThrowsException() { + CloudStackVolume request = new CloudStackVolume(); + request.setLun(null); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteCloudStackVolume(request)); + } + + @Test + void testDeleteCloudStackVolume_FeignException_ThrowsCloudRuntimeException() { + Lun lun = new Lun(); + lun.setName("/vol/vol1/lun1"); + lun.setUuid("lun-uuid-123"); + CloudStackVolume request = new CloudStackVolume(); + request.setLun(lun); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(500); + when(feignException.getMessage()).thenReturn("Internal server error"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doThrow(feignException).when(sanFeignClient).deleteLun(eq(authHeader), eq("lun-uuid-123"), anyMap()); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.deleteCloudStackVolume(request)); + } + } + + @Test + void testGetCloudStackVolume_FeignException404_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + CloudStackVolume result = unifiedSANStrategy.getCloudStackVolume(values); + + assertNull(result); + } + } + + @Test + void testGetCloudStackVolume_EmptyResponse_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "/vol/vol1/lun1"); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunResponse(eq(authHeader), anyMap())).thenReturn(emptyResponse); + + CloudStackVolume result = unifiedSANStrategy.getCloudStackVolume(values); + + assertNull(result); + } + } + + @Test + void testGetAccessGroup_FeignException404_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "igroup1"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + AccessGroup result = unifiedSANStrategy.getAccessGroup(values); + + assertNull(result); + } + } + + @Test + void testGetAccessGroup_EmptyResponse_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.NAME, "igroup1"); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenReturn(emptyResponse); + + AccessGroup result = unifiedSANStrategy.getAccessGroup(values); + + assertNull(result); + } + } + + @Test + void testEnableLogicalAccess_EmptySvmName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, ""); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testEnableLogicalAccess_EmptyLunName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, ""); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testEnableLogicalAccess_EmptyIgroupName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, ""); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.enableLogicalAccess(values)); + } + + @Test + void testDisableLogicalAccess_EmptyLunUuid_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, ""); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(values)); + } + + @Test + void testDisableLogicalAccess_EmptyIgroupUuid_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, ""); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.disableLogicalAccess(values)); + } + + @Test + void testDisableLogicalAccess_FeignException404_SkipsDeletion() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123"); + values.put(OntapStorageConstants.IGROUP_DOT_UUID, "igroup-uuid-123"); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + doThrow(feignException).when(sanFeignClient).deleteLunMap(eq(authHeader), eq("lun-uuid-123"), eq("igroup-uuid-123")); + + // Should not throw exception for 404 + assertDoesNotThrow(() -> unifiedSANStrategy.disableLogicalAccess(values)); + } + } + + @Test + void testGetLogicalAccess_MissingLunName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getLogicalAccess(values)); + } + + @Test + void testGetLogicalAccess_MissingIgroupName_ThrowsException() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + + assertThrows(CloudRuntimeException.class, + () -> unifiedSANStrategy.getLogicalAccess(values)); + } + + @Test + void testGetLogicalAccess_EmptyResponse_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(emptyResponse); + + Map result = unifiedSANStrategy.getLogicalAccess(values); + + assertNull(result); + } + } + + @Test + void testGetLogicalAccess_ExceptionThrown_ReturnsNull() { + Map values = new HashMap<>(); + values.put(OntapStorageConstants.SVM_DOT_NAME, "svm1"); + values.put(OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1"); + values.put(OntapStorageConstants.IGROUP_DOT_NAME, "igroup1"); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())) + .thenThrow(new RuntimeException("Connection failed")); + + Map result = unifiedSANStrategy.getLogicalAccess(values); + + assertNull(result); + } + } + + @Test + void testValidateInitiatorInAccessGroup_NullInitiator_ReturnsFalse() { + Igroup igroup = new Igroup(); + igroup.setName("igroup1"); + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(null, "svm1", igroup); + assertFalse(result); + } + + @Test + void testValidateInitiatorInAccessGroup_AccessGroupNotFound_ReturnsFalse() { + String hostInitiator = "iqn.1993-08.org.debian:01:host1"; + String svmName = "svm1"; + + Igroup igroup = new Igroup(); + igroup.setName("igroup1"); + igroup.setInitiators(null); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + boolean result = unifiedSANStrategy.validateInitiatorInAccessGroup(hostInitiator, svmName, igroup); + + assertFalse(result); + } + } + + @Test + void testDeleteAccessGroup_FeignException404_SkipsDeletion() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + FeignException feignException = mock(FeignException.class); + when(feignException.status()).thenReturn(404); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenThrow(feignException); + + // Should not throw exception for 404 + assertDoesNotThrow(() -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + } + + @Test + void testDeleteAccessGroup_NotFoundInResponse_SkipsDeletion() { + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setStoragePoolId(1L); + List hosts = new ArrayList<>(); + HostVO host1 = mock(HostVO.class); + when(host1.getName()).thenReturn("host1"); + hosts.add(host1); + accessGroup.setHostsToConnect(hosts); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + utilityMock.when(() -> OntapStorageUtils.getIgroupName("svm1", "host1")) + .thenReturn("igroup1"); + + when(sanFeignClient.getIgroupResponse(eq(authHeader), anyMap())).thenReturn(emptyResponse); + + // Should not throw exception when not found + assertDoesNotThrow(() -> unifiedSANStrategy.deleteAccessGroup(accessGroup)); + } + } + + @Test + void testEnsureLunMapped_ExistingMapping_ReturnsExistingNumber() { + // Setup + String svmName = "svm1"; + String lunName = "/vol/vol1/lun1"; + String accessGroupName = "igroup1"; + + LunMap lunMap = new LunMap(); + lunMap.setLogicalUnitNumber(3); + + OntapResponse response = new OntapResponse<>(); + response.setRecords(List.of(lunMap)); + + try (MockedStatic utilityMock = mockStatic(OntapStorageUtils.class)) { + utilityMock.when(() -> OntapStorageUtils.generateAuthHeader("admin", "password")) + .thenReturn(authHeader); + + when(sanFeignClient.getLunMapResponse(eq(authHeader), anyMap())).thenReturn(response); + + // Execute + String result = unifiedSANStrategy.ensureLunMapped(svmName, lunName, accessGroupName); + + // Verify + assertEquals("3", result); + // Verify createLunMap was NOT called + verify(sanFeignClient, never()).createLunMap(any(), anyBoolean(), any(LunMap.class)); + } + } +} diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/vmsnapshot/OntapVMSnapshotStrategyTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/vmsnapshot/OntapVMSnapshotStrategyTest.java new file mode 100644 index 000000000000..2fa9e77a20cd --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/vmsnapshot/OntapVMSnapshotStrategyTest.java @@ -0,0 +1,933 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; +import org.apache.cloudstack.storage.utils.OntapStorageConstants; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.FreezeThawVMAnswer; +import com.cloud.agent.api.FreezeThawVMCommand; +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + +/** + * Unit tests for {@link OntapVMSnapshotStrategy}. + * + *

Tests cover: + *

    + *
  • canHandle(VMSnapshot) — various conditions for Allocated and non-Allocated states
  • + *
  • canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) — allocation-phase checks
  • + *
  • takeVMSnapshot — state transition failure scenarios
  • + *
  • Freeze/thaw behavior (freeze success/failure, thaw success/failure, agent errors)
  • + *
  • Quiesce behavior (honors user input; freeze/thaw only when quiesce=true)
  • + *
+ */ +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +class OntapVMSnapshotStrategyTest { + + private static final long VM_ID = 100L; + private static final long HOST_ID = 10L; + private static final long SNAPSHOT_ID = 200L; + private static final long VOLUME_ID_1 = 301L; + private static final long VOLUME_ID_2 = 302L; + private static final long POOL_ID_1 = 401L; + private static final long POOL_ID_2 = 402L; + private static final long GUEST_OS_ID = 50L; + private static final String VM_INSTANCE_NAME = "i-2-100-VM"; + private static final String VM_UUID = "vm-uuid-123"; + + @Spy + private OntapVMSnapshotStrategy strategy; + + @Mock + private UserVmDao userVmDao; + @Mock + private VolumeDao volumeDao; + @Mock + private PrimaryDataStoreDao storagePool; + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + @Mock + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Mock + private VMSnapshotHelper vmSnapshotHelper; + @Mock + private VMSnapshotDao vmSnapshotDao; + @Mock + private AgentManager agentMgr; + @Mock + private GuestOSDao guestOSDao; + @Mock + private VolumeDataFactory volumeDataFactory; + @Mock + private VolumeDetailsDao volumeDetailsDao; + + @BeforeEach + void setUp() throws Exception { + // Inject mocks into the inherited fields via reflection + // DefaultVMSnapshotStrategy fields + setField(strategy, DefaultVMSnapshotStrategy.class, "vmSnapshotHelper", vmSnapshotHelper); + setField(strategy, DefaultVMSnapshotStrategy.class, "guestOSDao", guestOSDao); + setField(strategy, DefaultVMSnapshotStrategy.class, "userVmDao", userVmDao); + setField(strategy, DefaultVMSnapshotStrategy.class, "vmSnapshotDao", vmSnapshotDao); + setField(strategy, DefaultVMSnapshotStrategy.class, "agentMgr", agentMgr); + setField(strategy, DefaultVMSnapshotStrategy.class, "volumeDao", volumeDao); + + // StorageVMSnapshotStrategy fields + setField(strategy, StorageVMSnapshotStrategy.class, "storagePool", storagePool); + setField(strategy, StorageVMSnapshotStrategy.class, "vmSnapshotDetailsDao", vmSnapshotDetailsDao); + setField(strategy, StorageVMSnapshotStrategy.class, "volumeDataFactory", volumeDataFactory); + + // OntapVMSnapshotStrategy fields + setField(strategy, OntapVMSnapshotStrategy.class, "storagePoolDetailsDao", storagePoolDetailsDao); + setField(strategy, OntapVMSnapshotStrategy.class, "volumeDetailsDao", volumeDetailsDao); + } + + // ────────────────────────────────────────────────────────────────────────── + // Helper: inject field via reflection into a specific declaring class + // ────────────────────────────────────────────────────────────────────────── + + private void setField(Object target, Class declaringClass, String fieldName, Object value) throws Exception { + Field field = declaringClass.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(target, value); + } + + // ────────────────────────────────────────────────────────────────────────── + // Helper: create common mocks + // ────────────────────────────────────────────────────────────────────────── + + private UserVmVO createMockUserVm(Hypervisor.HypervisorType hypervisorType, VirtualMachine.State state) { + UserVmVO userVm = mock(UserVmVO.class); + when(userVm.getHypervisorType()).thenReturn(hypervisorType); + when(userVm.getState()).thenReturn(state); + return userVm; + } + + private VolumeVO createMockVolume(long volumeId, long poolId) { + VolumeVO volume = mock(VolumeVO.class); + when(volume.getId()).thenReturn(volumeId); + when(volume.getPoolId()).thenReturn(poolId); + return volume; + } + + private StoragePoolVO createOntapManagedPool(long poolId) { + StoragePoolVO pool = mock(StoragePoolVO.class); + when(pool.isManaged()).thenReturn(true); + when(pool.getStorageProviderName()).thenReturn(OntapStorageConstants.ONTAP_PLUGIN_NAME); + return pool; + } + + private VMSnapshotVO createMockVmSnapshot(VMSnapshot.State state, VMSnapshot.Type type) { + VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class); + when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID); + when(vmSnapshot.getVmId()).thenReturn(VM_ID); + when(vmSnapshot.getState()).thenReturn(state); + lenient().when(vmSnapshot.getType()).thenReturn(type); + return vmSnapshot; + } + + private void setupAllVolumesOnOntap() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol1 = createMockVolume(VOLUME_ID_1, POOL_ID_1); + VolumeVO vol2 = createMockVolume(VOLUME_ID_2, POOL_ID_2); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Arrays.asList(vol1, vol2)); + + StoragePoolVO pool1 = createOntapManagedPool(POOL_ID_1); + StoragePoolVO pool2 = createOntapManagedPool(POOL_ID_2); + when(storagePool.findById(POOL_ID_1)).thenReturn(pool1); + when(storagePool.findById(POOL_ID_2)).thenReturn(pool2); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: canHandle(VMSnapshot) + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testCanHandle_AllocatedDiskType_AllVolumesOnOntap_ReturnsHighest() { + setupAllVolumesOnOntap(); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.HIGHEST, result); + } + + @Test + void testCanHandle_AllocatedDiskAndMemoryType_ReturnsCantHandle() { + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.DiskAndMemory); + when(vmSnapshot.getVmId()).thenReturn(VM_ID); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VmNotFound_ReturnsCantHandle() { + when(userVmDao.findById(VM_ID)).thenReturn(null); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VmxenHypervisor_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.XenServer, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VmNotRunning_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Stopped); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_NoVolumes_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.emptyList()); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VolumeOnNonManagedPool_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol)); + + StoragePoolVO pool = mock(StoragePoolVO.class); + when(pool.isManaged()).thenReturn(false); + when(pool.getName()).thenReturn("non-managed-pool"); + when(storagePool.findById(POOL_ID_1)).thenReturn(pool); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VolumeOnNonOntapManagedPool_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol)); + + StoragePoolVO pool = mock(StoragePoolVO.class); + when(pool.isManaged()).thenReturn(true); + when(pool.getStorageProviderName()).thenReturn("SolidFire"); + when(pool.getName()).thenReturn("solidfire-pool"); + when(storagePool.findById(POOL_ID_1)).thenReturn(pool); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_VolumeWithNullPoolId_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol = mock(VolumeVO.class); + when(vol.getPoolId()).thenReturn(null); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol)); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_AllocatedDiskType_PoolNotFound_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol)); + when(storagePool.findById(POOL_ID_1)).thenReturn(null); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_NonAllocated_HasFlexVolSnapshotDetails_AllOnOntap_ReturnsHighest() { + setupAllVolumesOnOntap(); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk); + + List details = new ArrayList<>(); + details.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT, + "flex-uuid::snap-uuid::vmsnap_200_123::401", true)); + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(details); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.HIGHEST, result); + } + + @Test + void testCanHandle_NonAllocated_HasLegacyStorageSnapshotDetails_AllOnOntap_ReturnsHighest() { + setupAllVolumesOnOntap(); + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk); + + // No FlexVol details + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(Collections.emptyList()); + // Has legacy details + List details = new ArrayList<>(); + details.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, "kvmStorageSnapshot", "123", true)); + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, "kvmStorageSnapshot")).thenReturn(details); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.HIGHEST, result); + } + + @Test + void testCanHandle_NonAllocated_NoDetails_ReturnsCantHandle() { + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk); + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(Collections.emptyList()); + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, "kvmStorageSnapshot")).thenReturn(Collections.emptyList()); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_NonAllocated_HasFlexVolDetails_NotOnOntap_ReturnsCantHandle() { + // VM has FlexVol details but volumes are now on non-ONTAP storage + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol)); + + StoragePoolVO pool = mock(StoragePoolVO.class); + when(pool.isManaged()).thenReturn(false); + when(pool.getName()).thenReturn("other-pool"); + when(storagePool.findById(POOL_ID_1)).thenReturn(pool); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk); + List flexVolDetails = new ArrayList<>(); + flexVolDetails.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT, + "flex-uuid::snap-uuid::vmsnap_200_123::401", true)); + when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(flexVolDetails); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandle_MixedPools_OneOntapOneNot_ReturnsCantHandle() { + UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + VolumeVO vol1 = createMockVolume(VOLUME_ID_1, POOL_ID_1); + VolumeVO vol2 = createMockVolume(VOLUME_ID_2, POOL_ID_2); + when(volumeDao.findByInstance(VM_ID)).thenReturn(Arrays.asList(vol1, vol2)); + + StoragePoolVO ontapPool = createOntapManagedPool(POOL_ID_1); + StoragePoolVO otherPool = mock(StoragePoolVO.class); + when(otherPool.isManaged()).thenReturn(true); + when(otherPool.getStorageProviderName()).thenReturn("SolidFire"); + when(otherPool.getName()).thenReturn("sf-pool"); + when(storagePool.findById(POOL_ID_1)).thenReturn(ontapPool); + when(storagePool.findById(POOL_ID_2)).thenReturn(otherPool); + + VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk); + + StrategyPriority result = strategy.canHandle(vmSnapshot); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testCanHandleByVmId_MemorySnapshot_ReturnsCantHandle() { + StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, true); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + @Test + void testCanHandleByVmId_DiskOnly_AllOnOntap_ReturnsHighest() { + setupAllVolumesOnOntap(); + + StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, false); + + assertEquals(StrategyPriority.HIGHEST, result); + } + + @Test + void testCanHandleByVmId_DiskOnly_NotOnOntap_ReturnsCantHandle() { + when(userVmDao.findById(VM_ID)).thenReturn(null); + + StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, false); + + assertEquals(StrategyPriority.CANT_HANDLE, result); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: groupVolumesByFlexVol + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testGroupVolumesByFlexVol_SingleFlexVol_TwoVolumes() { + VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class); + when(volumeTO1.getId()).thenReturn(VOLUME_ID_1); + VolumeObjectTO volumeTO2 = mock(VolumeObjectTO.class); + when(volumeTO2.getId()).thenReturn(VOLUME_ID_2); + + VolumeVO vol1 = mock(VolumeVO.class); + when(vol1.getId()).thenReturn(VOLUME_ID_1); + when(vol1.getPoolId()).thenReturn(POOL_ID_1); + VolumeVO vol2 = mock(VolumeVO.class); + when(vol2.getId()).thenReturn(VOLUME_ID_2); + when(vol2.getPoolId()).thenReturn(POOL_ID_1); // same pool → same FlexVol + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1); + when(volumeDao.findById(VOLUME_ID_2)).thenReturn(vol2); + + Map poolDetails = new HashMap<>(); + poolDetails.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1"); + when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails); + + Map groups = + strategy.groupVolumesByFlexVol(Arrays.asList(volumeTO1, volumeTO2)); + + assertEquals(1, groups.size()); + assertEquals(2, groups.get("flexvol-uuid-1").volumeIds.size()); + } + + @Test + void testGroupVolumesByFlexVol_TwoFlexVols() { + VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class); + when(volumeTO1.getId()).thenReturn(VOLUME_ID_1); + VolumeObjectTO volumeTO2 = mock(VolumeObjectTO.class); + when(volumeTO2.getId()).thenReturn(VOLUME_ID_2); + + VolumeVO vol1 = mock(VolumeVO.class); + when(vol1.getId()).thenReturn(VOLUME_ID_1); + when(vol1.getPoolId()).thenReturn(POOL_ID_1); + VolumeVO vol2 = mock(VolumeVO.class); + when(vol2.getId()).thenReturn(VOLUME_ID_2); + when(vol2.getPoolId()).thenReturn(POOL_ID_2); // different pool → different FlexVol + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1); + when(volumeDao.findById(VOLUME_ID_2)).thenReturn(vol2); + + Map poolDetails1 = new HashMap<>(); + poolDetails1.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1"); + Map poolDetails2 = new HashMap<>(); + poolDetails2.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-2"); + when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails1); + when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_2)).thenReturn(poolDetails2); + + Map groups = + strategy.groupVolumesByFlexVol(Arrays.asList(volumeTO1, volumeTO2)); + + assertEquals(2, groups.size()); + assertEquals(1, groups.get("flexvol-uuid-1").volumeIds.size()); + assertEquals(1, groups.get("flexvol-uuid-2").volumeIds.size()); + } + + @Test + void testGroupVolumesByFlexVol_MissingFlexVolUuid_ThrowsException() { + VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class); + when(volumeTO1.getId()).thenReturn(VOLUME_ID_1); + + VolumeVO vol1 = mock(VolumeVO.class); + when(vol1.getId()).thenReturn(VOLUME_ID_1); + when(vol1.getPoolId()).thenReturn(POOL_ID_1); + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1); + + Map poolDetails = new HashMap<>(); + // No VOLUME_UUID key + when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails); + + assertThrows(CloudRuntimeException.class, + () -> strategy.groupVolumesByFlexVol(Collections.singletonList(volumeTO1))); + } + + @Test + void testGroupVolumesByFlexVol_VolumeNotFound_ThrowsException() { + VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class); + when(volumeTO1.getId()).thenReturn(VOLUME_ID_1); + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, + () -> strategy.groupVolumesByFlexVol(Collections.singletonList(volumeTO1))); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: FlexVolSnapshotDetail parse/toString + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testFlexVolSnapshotDetail_ParseAndToString_NewFormat() { + String value = "flexvol-uuid-1::snap-uuid-1::vmsnap_200_1234567890::root-disk.qcow2::401::NFS3"; + OntapVMSnapshotStrategy.FlexVolSnapshotDetail detail = + OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse(value); + + assertEquals("flexvol-uuid-1", detail.flexVolUuid); + assertEquals("snap-uuid-1", detail.snapshotUuid); + assertEquals("vmsnap_200_1234567890", detail.snapshotName); + assertEquals("root-disk.qcow2", detail.volumePath); + assertEquals(401L, detail.poolId); + assertEquals("NFS3", detail.protocol); + assertEquals(value, detail.toString()); + } + + @Test + void testFlexVolSnapshotDetail_ParseLegacy4FieldFormat() { + // Legacy format without volumePath and protocol + String value = "flexvol-uuid-1::snap-uuid-1::vmsnap_200_1234567890::401"; + OntapVMSnapshotStrategy.FlexVolSnapshotDetail detail = + OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse(value); + + assertEquals("flexvol-uuid-1", detail.flexVolUuid); + assertEquals("snap-uuid-1", detail.snapshotUuid); + assertEquals("vmsnap_200_1234567890", detail.snapshotName); + assertEquals(null, detail.volumePath); + assertEquals(401L, detail.poolId); + assertEquals(null, detail.protocol); + } + + @Test + void testFlexVolSnapshotDetail_ParseInvalidFormat_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("invalid-format")); + } + + @Test + void testFlexVolSnapshotDetail_ParseTooFewParts_ThrowsException() { + assertThrows(CloudRuntimeException.class, + () -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("a::b::c")); + } + + @Test + void testFlexVolSnapshotDetail_Parse5Parts_ThrowsException() { + // 5 parts is neither legacy (4) nor current (6) format + assertThrows(CloudRuntimeException.class, + () -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("a::b::c::d::e")); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: buildSnapshotName + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testBuildSnapshotName_Format() { + VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class); + when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID); + + String name = strategy.buildSnapshotName(vmSnapshot); + + assertEquals(true, name.startsWith("vmsnap_200_")); + assertEquals(true, name.length() <= OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: resolveVolumePathOnOntap + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testResolveVolumePathOnOntap_NFS_ReturnsVolumePath() { + VolumeVO vol = mock(VolumeVO.class); + when(vol.getPath()).thenReturn("abc123-def456.qcow2"); + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol); + + String path = strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "NFS3", new HashMap<>()); + + assertEquals("abc123-def456.qcow2", path); + } + + @Test + void testResolveVolumePathOnOntap_ISCSI_ReturnsLunName() { + VolumeDetailVO lunDetail = mock(VolumeDetailVO.class); + when(lunDetail.getValue()).thenReturn("/vol/vol1/lun_301"); + when(volumeDetailsDao.findDetail(VOLUME_ID_1, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunDetail); + + String path = strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "ISCSI", new HashMap<>()); + + assertEquals("/vol/vol1/lun_301", path); + } + + @Test + void testResolveVolumePathOnOntap_ISCSI_NoLunDetail_ThrowsException() { + when(volumeDetailsDao.findDetail(VOLUME_ID_1, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, + () -> strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "ISCSI", new HashMap<>())); + } + + @Test + void testResolveVolumePathOnOntap_NFS_VolumeNotFound_ThrowsException() { + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(null); + + assertThrows(CloudRuntimeException.class, + () -> strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "NFS3", new HashMap<>())); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: takeVMSnapshot — State transitions & Freeze/Thaw + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testTakeVMSnapshot_StateTransitionFails_ThrowsCloudRuntimeException() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + when(vmSnapshotHelper.pickRunningHost(VM_ID)).thenReturn(HOST_ID); + UserVmVO userVm = mock(UserVmVO.class); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + // State transition fails + doThrow(new NoTransitionException("Cannot transition")).when(vmSnapshotHelper) + .vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.CreateRequested); + + assertThrows(CloudRuntimeException.class, () -> strategy.takeVMSnapshot(vmSnapshot)); + } + + @Test + void testTakeVMSnapshot_FreezeFailure_ThrowsException() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + // Freeze failure + FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class); + when(freezeAnswer.getResult()).thenReturn(false); + when(freezeAnswer.getDetails()).thenReturn("qemu-guest-agent not responding"); + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))).thenReturn(freezeAnswer); + + // Cleanup mocks for finally block + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + CloudRuntimeException ex = assertThrows(CloudRuntimeException.class, + () -> strategy.takeVMSnapshot(vmSnapshot)); + + assertEquals(true, ex.getMessage().contains("Could not freeze VM")); + assertEquals(true, ex.getMessage().contains("qemu-guest-agent")); + } + + @Test + void testTakeVMSnapshot_FreezeReturnsNull_ThrowsException() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + // Freeze returns null + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))).thenReturn(null); + + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + assertThrows(CloudRuntimeException.class, () -> strategy.takeVMSnapshot(vmSnapshot)); + } + + @Test + void testTakeVMSnapshot_AgentUnavailable_ThrowsCloudRuntimeException() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))) + .thenThrow(new AgentUnavailableException(HOST_ID)); + + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + CloudRuntimeException ex = assertThrows(CloudRuntimeException.class, + () -> strategy.takeVMSnapshot(vmSnapshot)); + assertEquals(true, ex.getMessage().contains("failed")); + } + + @Test + void testTakeVMSnapshot_OperationTimeout_ThrowsCloudRuntimeException() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))) + .thenThrow(new OperationTimedoutException(null, 0, 0, 0, false)); + + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + CloudRuntimeException ex = assertThrows(CloudRuntimeException.class, + () -> strategy.takeVMSnapshot(vmSnapshot)); + assertEquals(true, ex.getMessage().contains("timed out")); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: Quiesce Behavior + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testTakeVMSnapshot_QuiesceFalse_SkipsFreezeThaw() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + // Explicitly set quiesce to false + VMSnapshotOptions options = mock(VMSnapshotOptions.class); + when(options.needQuiesceVM()).thenReturn(false); + when(vmSnapshot.getOptions()).thenReturn(options); + + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + // The FlexVolume snapshot flow will try to call Utility.getStrategyByStoragePoolDetails + // which is a static method that makes real connections. We expect this to fail in unit tests. + // The important thing is that freeze/thaw was NOT called before the failure. + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + // Since Utility.getStrategyByStoragePoolDetails is static and creates real Feign clients, + // this will fail. We just verify that freeze was never called. + try { + strategy.takeVMSnapshot(vmSnapshot); + } catch (Exception e) { + // Expected — static utility can't be mocked in unit test + } + + // No freeze/thaw commands should be sent when quiesce is false + verify(agentMgr, never()).send(eq(HOST_ID), any(FreezeThawVMCommand.class)); + } + + // ══════════════════════════════════════════════════════════════════════════ + // Tests: Parent snapshot chain + // ══════════════════════════════════════════════════════════════════════════ + + @Test + void testTakeVMSnapshot_WithParentSnapshot_SetsParentId() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + // Has a current (parent) snapshot + VMSnapshotVO currentSnapshot = mock(VMSnapshotVO.class); + when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(currentSnapshot); + VMSnapshotTO parentTO = mock(VMSnapshotTO.class); + when(parentTO.getId()).thenReturn(199L); + when(vmSnapshotHelper.getSnapshotWithParents(currentSnapshot)).thenReturn(parentTO); + + // Freeze success (since quiesce=true by default) + FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class); + when(freezeAnswer.getResult()).thenReturn(true); + FreezeThawVMAnswer thawAnswer = mock(FreezeThawVMAnswer.class); + when(thawAnswer.getResult()).thenReturn(true); + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))) + .thenReturn(freezeAnswer) + .thenReturn(thawAnswer); + + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + // FlexVol snapshot flow will fail on static method, but parent should already be set + try { + strategy.takeVMSnapshot(vmSnapshot); + } catch (Exception e) { + // Expected + } + + // Verify parent was set on the VM snapshot before the FlexVol snapshot attempt + verify(vmSnapshot).setParent(199L); + } + + @Test + void testTakeVMSnapshot_WithNoParentSnapshot_SetsParentNull() throws Exception { + VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot(); + setupTakeSnapshotCommon(vmSnapshot); + setupSingleVolumeForTakeSnapshot(); + + when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(null); + + FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class); + when(freezeAnswer.getResult()).thenReturn(true); + FreezeThawVMAnswer thawAnswer = mock(FreezeThawVMAnswer.class); + when(thawAnswer.getResult()).thenReturn(true); + when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))) + .thenReturn(freezeAnswer) + .thenReturn(thawAnswer); + + when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList()); + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed)); + + try { + strategy.takeVMSnapshot(vmSnapshot); + } catch (Exception e) { + // Expected + } + + verify(vmSnapshot).setParent(null); + } + + // ────────────────────────────────────────────────────────────────────────── + // Helper: Set up common mocks for takeVMSnapshot tests + // ────────────────────────────────────────────────────────────────────────── + + private VMSnapshotVO createTakeSnapshotVmSnapshot() { + VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class); + when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID); + when(vmSnapshot.getVmId()).thenReturn(VM_ID); + lenient().when(vmSnapshot.getName()).thenReturn("vm-snap-1"); + lenient().when(vmSnapshot.getType()).thenReturn(VMSnapshot.Type.Disk); + lenient().when(vmSnapshot.getDescription()).thenReturn("Test ONTAP VM Snapshot"); + lenient().when(vmSnapshot.getOptions()).thenReturn(new VMSnapshotOptions(true)); + return vmSnapshot; + } + + private UserVmVO setupTakeSnapshotCommon(VMSnapshotVO vmSnapshot) throws Exception { + when(vmSnapshotHelper.pickRunningHost(VM_ID)).thenReturn(HOST_ID); + + UserVmVO userVm = mock(UserVmVO.class); + when(userVm.getId()).thenReturn(VM_ID); + when(userVm.getGuestOSId()).thenReturn(GUEST_OS_ID); + when(userVm.getInstanceName()).thenReturn(VM_INSTANCE_NAME); + when(userVm.getUuid()).thenReturn(VM_UUID); + when(userVm.getState()).thenReturn(VirtualMachine.State.Running); + when(userVmDao.findById(VM_ID)).thenReturn(userVm); + + GuestOSVO guestOS = mock(GuestOSVO.class); + when(guestOS.getDisplayName()).thenReturn("CentOS 8"); + when(guestOSDao.findById(GUEST_OS_ID)).thenReturn(guestOS); + + when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(null); + + doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.CreateRequested); + + return userVm; + } + + private void setupSingleVolumeForTakeSnapshot() { + VolumeObjectTO volumeTO = mock(VolumeObjectTO.class); + when(volumeTO.getId()).thenReturn(VOLUME_ID_1); + when(volumeTO.getSize()).thenReturn(10737418240L); + List volumeTOs = Collections.singletonList(volumeTO); + when(vmSnapshotHelper.getVolumeTOList(VM_ID)).thenReturn(volumeTOs); + + VolumeVO volumeVO = mock(VolumeVO.class); + when(volumeVO.getId()).thenReturn(VOLUME_ID_1); + when(volumeVO.getPoolId()).thenReturn(POOL_ID_1); + when(volumeVO.getVmSnapshotChainSize()).thenReturn(null); + when(volumeDao.findById(VOLUME_ID_1)).thenReturn(volumeVO); + + // Pool details for FlexVol grouping + Map poolDetails = new HashMap<>(); + poolDetails.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1"); + poolDetails.put(OntapStorageConstants.USERNAME, "admin"); + poolDetails.put(OntapStorageConstants.PASSWORD, "pass"); + poolDetails.put(OntapStorageConstants.STORAGE_IP, "10.0.0.1"); + poolDetails.put(OntapStorageConstants.SVM_NAME, "svm1"); + poolDetails.put(OntapStorageConstants.SIZE, "107374182400"); + poolDetails.put(OntapStorageConstants.PROTOCOL, "NFS3"); + when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails); + + VolumeInfo volumeInfo = mock(VolumeInfo.class); + when(volumeInfo.getId()).thenReturn(VOLUME_ID_1); + when(volumeInfo.getName()).thenReturn("vol-1"); + when(volumeDataFactory.getVolume(VOLUME_ID_1)).thenReturn(volumeInfo); + } +} diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 5c90b5cbee6b..029c36a98fc0 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -135,6 +135,7 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase implements VMSnapshotManager, VMSnapshotService, VmWorkJobHandler, Configurable { public static final String VM_WORK_JOB_HANDLER = VMSnapshotManagerImpl.class.getSimpleName(); + public static final String ONTAP_PLUGIN_NAME = "NetApp ONTAP"; @Inject VMInstanceDao _vmInstanceDao; @@ -390,6 +391,15 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc //Other Storage volume plugins could integrate this with their own functionality for group snapshots VMSnapshotStrategy snapshotStrategy = storageStrategyFactory.getVmSnapshotStrategy(userVmVo.getId(), rootVolumePool.getId(), snapshotMemory); if (snapshotStrategy == null) { + // Check if this is ONTAP managed storage with memory snapshot request - provide specific error message + if (snapshotMemory && rootVolumePool.isManaged() && + ONTAP_PLUGIN_NAME.equals(rootVolumePool.getStorageProviderName())) { + String message = String.format("Memory snapshots (snapshotmemory=true) are not supported for VMs on ONTAP managed storage. " + + "Instance [%s] uses ONTAP storage which only supports disk-only (crash-consistent) snapshots. " + + "Please use snapshotmemory=false for disk-only snapshots.", userVmVo.getUuid()); + logger.error(message); + throw new CloudRuntimeException(message); + } String message = String.format("No strategy was able to handle requested snapshot for Instance [%s].", userVmVo.getUuid()); logger.error(message); throw new CloudRuntimeException(message); diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 6f3a623b8092..5f1747e66629 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -2941,6 +2941,12 @@ "label.leased": "Leased", "label.totalduration": "Total duration", "label.usestoragereplication": "Use primary storage replication", +"label.ontap.username.tooltip": "The Username for the NetApp ONTAP storage array", +"label.ontap.password.tooltip": "The Password for the NetApp ONTAP storage array", +"label.ontap.ip.tooltip": "The IP for the NetApp ONTAP storage array", +"label.ontap.svm.name.tooltip": "The SVM Name for the NetApp ONTAP storage array", +"label.ontap.ip": "Storage Array IP", +"label.ontap.svm.name": "SVM Name", "message.acquire.ip.failed": "Failed to acquire IP.", "message.action.acquire.ip": "Please confirm that you want to acquire new IP.", "message.action.cancel.maintenance": "Your host has been successfully canceled for maintenance. This process can take up to several minutes.", diff --git a/ui/src/views/infra/AddPrimaryStorage.vue b/ui/src/views/infra/AddPrimaryStorage.vue index d46396bbb3a5..a869ff0e6a1c 100644 --- a/ui/src/views/infra/AddPrimaryStorage.vue +++ b/ui/src/views/infra/AddPrimaryStorage.vue @@ -242,7 +242,7 @@ -
+