diff --git a/.github/workflows/nebula-ci.yml b/.github/workflows/nebula-ci.yml new file mode 100644 index 000000000..314ca6f5b --- /dev/null +++ b/.github/workflows/nebula-ci.yml @@ -0,0 +1,45 @@ +name: "CI" +on: + push: + branches: + - '*' + tags-ignore: + - '*' + pull_request: + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + # test against JDK 8 + java: [ 8 ] + name: CI with Java ${{ matrix.java }} + steps: + - uses: actions/checkout@v1 + - name: Setup jdk + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - uses: actions/cache@v1 + id: gradle-cache + with: + path: ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/gradle/dependency-locks/*.lockfile') }} + restore-keys: | + - ${{ runner.os }}-gradle- + - uses: actions/cache@v1 + id: gradle-wrapper-cache + with: + path: ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + restore-keys: | + - ${{ runner.os }}-gradlewrapper- + - name: Build with Gradle + run: ./gradlew --info --stacktrace build + env: + CI_NAME: github_actions + CI_BUILD_NUMBER: ${{ github.sha }} + CI_BUILD_URL: 'https://github.com/${{ github.repository }}' + CI_BRANCH: ${{ github.ref }} + COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/nebula-publish.yml b/.github/workflows/nebula-publish.yml new file mode 100644 index 000000000..5e20218a1 --- /dev/null +++ b/.github/workflows/nebula-publish.yml @@ -0,0 +1,51 @@ +name: "Publish candidate/release to NetflixOSS and Maven Central" +on: + push: + tags: + - v*.*.* + - v*.*.*-rc.* + release: + types: + - published + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Setup jdk 8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - uses: actions/cache@v1 + id: gradle-cache + with: + path: ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/gradle/dependency-locks/*.lockfile') }} + restore-keys: | + - ${{ runner.os }}-gradle- + - uses: actions/cache@v1 + id: gradle-wrapper-cache + with: + path: ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + restore-keys: | + - ${{ runner.os }}-gradlewrapper- + - name: Publish candidate + if: contains(github.ref, '-rc.') + run: ./gradlew --info --stacktrace -Prelease.useLastTag=true candidate + env: + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} + - name: Publish release + if: (!contains(github.ref, '-rc.')) + run: ./gradlew --info -Prelease.useLastTag=true final + env: + NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }} + NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }} + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} diff --git a/.github/workflows/nebula-snapshot.yml b/.github/workflows/nebula-snapshot.yml new file mode 100644 index 000000000..b4ee74093 --- /dev/null +++ b/.github/workflows/nebula-snapshot.yml @@ -0,0 +1,37 @@ +name: "Publish snapshot to NetflixOSS and Maven Central" + +on: + push: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Set up JDK + uses: actions/setup-java@v1 + with: + java-version: 8 + - uses: actions/cache@v2 + id: gradle-cache + with: + path: | + ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} + - uses: actions/cache@v2 + id: gradle-wrapper-cache + with: + path: | + ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + - name: Build + run: ./gradlew build snapshot + env: + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} diff --git a/.gitignore b/.gitignore index 8163af2a8..c1862c52c 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,7 @@ Thumbs.db .gradle .m2 + # Build output directies /target */target @@ -67,3 +68,6 @@ atlassian-ide-plugin.xml # NetBeans specific files/directories .nbattrs >>>>>>> build/multi-project + +# publishing secrets +secrets/signing-key diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 60fe9e378..000000000 --- a/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: java -jdk: -- oraclejdk8 -sudo: false -install: ./installViaTravis.sh -script: ./buildViaTravis.sh -cache: - directories: - - $HOME/.gradle -env: - global: - - secure: p81PYYYYhjhp1uJHOoT9EfUia8qPMphdykCxQannWMVvIFKXF5ibi/Qd52OUIYuJT9Q5MN9GhlwCtolxmCZu44YMLi99LsoITAv6n08LAFEux1oAOXmGt3ZBllvBFtlW+jlQ5lKmbuuPue3RVbTaWzL3KCCaYQ4uKW/sP31bXic= - - secure: AYWrIC/1JqAVyLhzlDhL163LypPpq7AliDltfey9emxzS73vB5VrGTHG45Em7xdkHN0kc1vED+UFsYks7ym1olXZPsbW29R0Gfd+p1VxCE3Cbhj7x690xNC3wIlSgWGpbgiPs7vhVN80OuNgnpz9/+WRFpKAVrYrcisbvugsDqk= - - secure: Ai75crFDh3imkmxzNQDXIwK49appZuZTrsmdZzgH1Zl/nD33RoZPWUWeeHP0yJoicNqZlU6aEXJnk8JGDWtPIT3VNO7Rsey+yrw041rHNV6bFGpwGdQgKv5CUhRHK+gShgFgnME5THCPKvK51+li1feIoByoYVmEn0gmvcxL0yM= - - secure: ZqBsyaFMBz3cqM7UHq0IRotTPaoBNUHrC7buiAqqhZ5JNM48a2m7ORuu/SENPFfr6xOT5KylCrvT6eD1bkQi5S2jT9Qyoju/9H/hXaAUG89dhwKOxqEE11ZGKF8DlNO/jcmchVnIkgWfEjVKc7vCAww1Y8lgxhyj4gRtnln+F+o= diff --git a/CHANGELOG.md b/CHANGELOG.md index b6576c162..7be01db77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,242 @@ # Changelog +## 2022/11/15 3.11.94 +(#1013) Create operator-specifiable time such that if a backup file was written before then it is automatically compressed using SNAPPY before upload. +(#1012) Ensure SI files get into meta file properly. + +## 2022/10/10 3.11.93 +(#1006) Adding ability to add more keys in the message attributes for backup notification messages. +(#999) Identify incrementals, optionally skip metafile compression, and upload data files last. +(#1009) Pass millis to sleep instead of seconds. + +## 2022/09/15 3.11.92 +(#1005) RandomPartitioner creates tokens that don't always fit into a long so we use BigInteger to store and compare them. + +## 2022/09/14 3.11.91 +(#997) Revert "CASS-2805 Add hook to optionally skip deletion for files added within a certain window. (#992)" +(#1001) Use IP to get gossip info rather than hostname. +(#1002) Spread deletes over the interval depending on where the node is in the ring. +(#966) Remove UpdateSecuritySettings + +## 2022/06/26 3.11.90 +*UpdateSecuritySettings will be removed in the next release* +(#992) Add hook to optionally skip deletion for files added within a certain window. + +## 2022/06/02 3.11.89 +*UpdateSecuritySettings will be removed in the next release* +(#975) Dynamic Rate Limiting of Snapshots. +(#985) Optionally Add Content-MD5 header to uploads of files smaller than the backupChunkSize. + +## 2022/01/25 3.11.88 +(#979) Cache region in AWSInstanceInfo to avoid throttling. + +## 2022/01/25 3.11.87 +Botched release. Disregard + +## 2022/01/04 3.11.86 +(#977) Use IMDSV2 to get instance metadata + +## 2021/08/31 3.11.85 +(#973) Throw in case of gossip mismatch when determining replace_ip in assigned token case. +(#970) Make getPublicIP and getPublicHostname fail elegantly when those attributes are absent. + +## 2021/08/18 3.11.84 +(#961) Ensure SI backup directories are deleted when empty + +## 2021/07/28 3.11.83 +(#960) Fix bug in metafile generation where column family names are printed in place of keyspace names. + +## 2021/07/11 3.11.82 +(#955) Upload and delete any backup directory starting with a '.' + +## 2021/06/11 3.11.81 +(#952) Fixing integer overflow problem in restore. + +## 2021/06/07 3.11.80 +(#948) Reverting back to previous behavior of omitting milliseconds in backup last modified times. + +## 2021/05/25 3.11.79 +(#922) Optionally skip compression of backups +(#943) Optionally make extra check to ensure Thrift server is actually listening on the rpc_port +(#944) Make disk_access_mode a first class configuration option +(#941) Delete secondary index backup directories after uploading +(#940) Improve operator control over creation of ingress rules + +## 2021/05/12 3.11.78 +Improve operator control over creation of ingress rules + +## 2021/03/17 3.11.77 +(#913) Store Private IPs in the token database when the snitch is GPFS. + +## 2021/03/09 3.11.76 +(#918) Adding support for custom override for role_manager. + +## 2020/11/09 3.11.73 +(#911) Backup secondary index files. + +## 2020/09/30 3.11.72 +(#908, #910) Stop explicitly filtering OpsCenter keyspace when backing up. Remove more noisy log statements. + +## 2020/09/08 3.11.71 +(#902) Remove noisy log statements from CassandraAdmin. + +## 2020/08/11 3.11.70 +(#901) Throw when gossip unanimously says token is already owned by a live node. + +## 2020/07/15 3.11.69 +(#894) Fix the inferTokenOwnership information. This will provide all the details to the caller method so they can make decision rather than throwing any exception. +(#897) Make BackupVerificationTask log and emit when there is no verified backup within SLO. Cease requiring the backup to be fully in S3. + +## 2020/07/02 3.11.68 +(#891) Adding an exception in the replace-ip path when a node attempts to bootstrap to an existing token because of a stale state. + +## 2020/06/04 3.11.67 +Re-releasing 3.11.66 + +## 2020/06/03 3.11.66 +(#881) Porting PropertiesFileTuner to the 3.11 branch. + +## 2020/05/26 3.11.65 +(#884) Adding support for upstream C* log directory env variable. + +## 2020/05/19 3.11.64 +Re-re-releasing 3.11.62 + +## 2020/05/19 3.11.63 +Re-releasing 3.11.62 + +## 2020/05/19 3.11.62 +(#878) Fixing BackupServletV2 endpoints that were broken because of an underlying dependency change from the release 3.11.59. + +## 2020/05/18 3.11.61 +This is a re-release of v3.11.60 that failed to be uploaded. + +## 2020/05/18 3.11.60 +(#870) Fixing PriamConfig endpoints that were broken because of an underlying dependency change from the last release. + +## 2020/05/05 3.11.59 +(#860, #864) Fixing the bug in the backup verification strategy to only page when there is no valid backup in the specified date range (SLO window) And also disable lifecyle rule for backup if backup v1 is disabled. + +## 2020/04/22 3.11.58 +(#850) Modifying the backup verification strategy to verify all unverified backups in the specified date range vs the old implementation that verified the latest backup in the specified date range. Also adding a hook in StandardTuner to allow for subclasses to add custom Cassandra parameters + +## 2020/02/21 3.11.57 +(#844, #839) Implementation of a filter for Backup Notification. The filter can be controlled using the configuration "priam.backupNotifyComponentIncludeList" + +## 2019/10/24 3.11.56 +(#836) Move flush and compactions to Service Layer. This allows us to "hot" reload the jobs when configurations change. +(#836) Send SNAPSHOT_VERIFIED message when a snapshot is verified and ready to be consumed by downward dependencies. + +## 2019/08/23 3.11.55 +(#832) Travis build fails for oraclejdk8. Migration to openjdk8 + +## 2019/10/16 3.11.54 +(#834) Removing functionality of creating incremental manifest file in backup V1 as it is not used. +(#834) Bug fix: When meta file do not exist for TTL in backup v2 we should not be throwing NPE. +(#834) Bug fix: Fix X-Y-Z issue using gossip status information instead of gossip state. Note that gossip status is (JOINING/LEAVING/NORMAL) while gossip state is (UP/DOWN). Gossip state is calculated individually by all the Cassandra instances using gossip status. + +## 2019/06/07 3.11.53 +(#826): Rollback the fixes to use Gossip info while grabbing dead and pre-assigned tokens. Gossip info doesn't not reflect the correct cluster state always. A node marked with status as NORMAL in the Gossip info could actually be down. This can be checked using nt ring. This change will unblock the nodes from joining the ring. + +## 2019/05/28 3.11.52 +(#824): Use replace_address instead of replace_address_first_boot. replace_address always try to bootstrap Cassandra in replace mode even when the previous bootstrap is successful. replace_address_first_boot tries to bootstrap normally if the node already bootstrapped successfully. + +## 2019/05/14 3.11.51 +(#818) Changing the list in TokenRetrievalUtils to use wildcards. + +## 2019/05/13 3.11.50 +(#816) Priam will check Cassandra gossip information while grabbing pre-assigned token to decide if it should start Cassandra in bootstrap mode or in replace mode. +(#816) At most 3 random nodes are used to get the gossip information. +(#816) Moved token owner inferring logic based on Cassandra gossip into a util class. +(#816) Refactored InstanceIdentity.init() method. + +## 2019/04/29 3.11.49 +(#815) Update the backup service based on configuration changes. +(#812) Expose the list of files from backups as API call. +(#809) Run TTL for backup based on a simple timer to avoid S3 delete API call throttle. +(#815) API to clear the local filesystem cache. +(#815) Bug fix: Increment backup failure metric when no backup is found. +(#809) Bug fix: No backup verification job during restore. + +## 2019/03/19 3.11.48 +(#807) Fix X->Y->Z issue. Replace nodes when gossip actually converges. + +## 2019/03/13 3.11.47 +(#804) Write-thru cache in AbstractFileSystem. +(#803) Take care of issue - C* snapshot w.r.t. filesystem is not "sync" in nature. + +## 2019/03/05 3.11.46 +(#794) Fix for forgotten file +(#798) Use older API for prefix filtering (backup TTL), if prefix is available. +(#800) Send notifications only when we upload a file. + +## 2019/02/27 3.11.45 +(#793) S3 - BucketLifecycleConfiguration has `prefix` method removed from latest library. + +## 2019/02/27 3.11.44 +(#791) BackupServlet had an API call of backup status which was producing output which was not JSON. + +## 2019/02/15 3.11.43 +(#784): BackupVerificationService +(#781): Put a cache for the getObjectExist API call to S3. This will help keep the cost of this call at bay. +(#781): Put a rate limiter for getObjectExist API call to S3 so we can limit the no. of calls. +(#784): Provide an override method to force Priam to replace a particular IP. + +## 2019/02/08 3.11.42 +(#777)Do not check existence of file if it is not SST_V2. S3 may decide to slow down and throw an error. Best not to do s3 object check (API) if it is not required. + +## 2019/02/07 3.11.41 +(#775) Do not throw NPE when no backup is found for the requested date. + +## 2019/01/30 3.11.39 +(#765) Add metrics on CassandraConfig resource calls +(#768) Support configure/tune complex parameters in cassandra.yaml +(#770) Add Cass SNAPSHOT JMX status, snapshot version, last validated timestamp. Changes to Servlet API and new APIs. + +## 2019/01/11 3.11.38 +(#761) Add new file format (SST_V2) and methods to get/parse remote locations. +(#761) Upload files from SnapshotMetaService in backup version 2.0, if enabled. +(#761) Process older SNAPSHOT_V2 at the restart of Priam. +(#767) Backup Verification for Backup 2.0. +(#767) Restore for Backup 2.0 +(#767) Some API changes for Snapshot Verification +(#767) Remove deprecated code like flush hour or snapshot hour. + +## 2018/10/29 3.11.37 +* Bug Fix: SnapshotMetaService can leave snapshots if there is any error. +* Bug Fix: SnapshotMetaService should continue building snapshot even if an unexpected file is found in snapshot. +* More cleanup of IConfiguration and moving code to appropriate places. + +## 2018/10/26 3.11.36 +* (#747) Aggregate InstanceData in InstanceInfo and pull information about running instance + +## 2018/10/17 3.11.35 +* (#739) BugFix: Null pointer exception while traversing filesystem. +* (#737) Google java format validator addition. Use ./gradlew goJF to fix the formatting before sending PR. +* (#740) Last but not least, a new logo for Priam. + +## 2018/10/08 3.11.33 +***WARNING*** THIS IS A BREAKING RELEASE +### New Feature +* (#731) Restores will be async in nature by default. +* (#731) Support for async snapshots via configuration - `priam.async.snapshot`. Similar support for async incrementals via configuration - `priam.async.incremental`. +* (#731) Better metrics for upload and download to/from remote file system. +* (#731) Better support for include/exclude keyspaces/columnfamilies from backup, incremental backup and restores. +* (#731) Expose priam configuration over HTTP and persist at regular interval (CRON) to local file system for automation/tooling. +### Bug fix +* (#731) Metrics are incremented only once and in a central location at AbstractFileSystem. +* (#731) Remove deprecated AWS API Calls. +### Breaking changes +* (#731) Removal of MBeans to collect metrics from S3FileSystem. They were unreliable and incorrect. +* (#731) Update to backup configurations :- isIncrBackupParallelEnabled, getIncrementalBkupMaxConsumers, getIncrementalBkupQueueSize. They are renamed to ensure naming consistency. Refer to wiki for more details. +* (#731) Changes to backup/restore configuration :- getSnapshotKeyspaceFilters, getSnapshotCFFilter, getIncrementalKeyspaceFilters, getIncrementalCFFilter, getRestoreKeyspaceFilter, getRestoreCFFilter. They are now centralized to ensure that we can support both include and exclude keyspaces/CF. Refer to wiki for more details. + +## 2018/10/02 3.11.32 +* (#727) Bug Fix: Continue uploading incrementals when parallel incrementals is enabled and file fails to upload. +* (#718) Add last modified time to S3 Object Metadata. + +## 2018/09/10 3.11.31 +* (#715) Bug Fix: Fix the bootstrap issue. Do not provide yourself as seed node if cluster is already up and running as it will lead to data loss. + ## 2018/08/20 3.11.30 ***WARNING*** THIS IS A BREAKING RELEASE ### New Feature diff --git a/README.md b/README.md index 598f6d600..6e6071db0 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,33 @@ -# Priam +

+ Priam Logo +

-[![Build Status](https://travis-ci.org/Netflix/Priam.svg?branch=3.x)](https://travis-ci.org/Netflix/Priam) +
-### Priam 3.11 branch supports Cassandra 3.11.x and 3.11+ +[Releases][release]   |   [Documentation][wiki]   |    +[![Build Status][img-travis-ci]][travis-ci] + +
+ +## Important Notice +* Priam 3.11 branch supports Cassandra 3.x. Netflix internally uses Apache Cassandra 3.0.19. + +## Table of Contents +[**TL;DR**](#tldr) + +[**Features**](#features) + +[**Compatibility**](#compatibility) + +[**Installation**](#installation) + +**Additional Info** + * [**Cluster Management**](#clustermanagement) + * [**Changelog**](#changelog) + + +## TL;DR Priam is a process/tool that runs alongside Apache Cassandra to automate the following: - Backup and recovery (Complete and incremental) - Token management @@ -17,7 +41,7 @@ The name 'Priam' refers to the King of Troy in Greek mythology, who was the fath Priam is actively developed and used at Netflix. -Features: +## Features - Token management using SimpleDB - Support multi-region Cassandra deployment in AWS via public IP. - Automated security group update in multi-region environment. @@ -28,5 +52,24 @@ Features: - APIs to list and restore backup data. - REST APIs for backup/restore and other operations -Compatibility: -Please see https://github.com/Netflix/Priam/wiki/Compatibility for details. +## Compatibility +See [Compatibility](http://netflix.github.io/Priam/#compatibility) for details. + + +## Installation +See [Setup](http://netflix.github.io/Priam/latest/mgmt/installation/) for details. + + +## Cluster Management +Basic configuration/REST API's to manage cassandra cluster. See [Cluster Management](http://netflix.github.io/Priam/latest/management/) for details. +## Changelog +See [CHANGELOG.md](CHANGELOG.md) + + +[release]:https://github.com/Netflix/Priam/releases/latest "Latest Release (external link) ➶" +[wiki]:http://netflix.github.io/Priam/ +[repo]:https://github.com/Netflix/Priam +[img-travis-ci]:https://travis-ci.com/Netflix/Priam.svg?branch=3.11 +[travis-ci]:https://travis-ci.com/Netflix/Priam diff --git a/build.gradle b/build.gradle index 5e0628a90..bbaa3da3f 100644 --- a/build.gradle +++ b/build.gradle @@ -1,5 +1,10 @@ plugins { - id 'nebula.netflixoss' version '5.1.1' + id 'nebula.netflixoss' version '9.1.0' + id 'com.github.sherter.google-java-format' version '0.8' +} + +googleJavaFormat { + options style: 'AOSP' } ext.githubProjectName = 'Priam' @@ -11,7 +16,8 @@ allprojects { group = 'com.netflix.priam' repositories { - jcenter() + mavenCentral() + google() } configurations { @@ -21,49 +27,60 @@ allprojects { } dependencies { - compile 'org.apache.commons:commons-lang3:3.5' + compile 'org.apache.commons:commons-lang3:3.8.1' + compile 'org.apache.commons:commons-text:1.8' compile 'commons-logging:commons-logging:1.2' - compile 'org.apache.commons:commons-collections4:4.1' + compile 'org.apache.commons:commons-collections4:4.2' compile 'commons-io:commons-io:2.6' compile 'commons-cli:commons-cli:1.4' - compile 'commons-httpclient:commons-httpclient:3.1' compile 'com.sun.jersey.contribs:jersey-multipart:1.19.4' compile 'com.sun.jersey:jersey-json:1.19.4' compile 'com.sun.jersey:jersey-bundle:1.19.4' compile 'com.sun.jersey.contribs:jersey-guice:1.19.4' compile 'com.google.guava:guava:21.0' compile 'com.google.code.findbugs:jsr305:3.0.2' - compile 'com.amazonaws:aws-java-sdk:1.11.386' - compile 'com.google.inject:guice:4.1.0' - compile 'com.google.inject.extensions:guice-servlet:4.1.0' + + // AWS Services + compile 'com.amazonaws:aws-java-sdk-core:latest.release' + compile 'com.amazonaws:aws-java-sdk-s3:latest.release' + compile 'com.amazonaws:aws-java-sdk-sns:latest.release' + compile 'com.amazonaws:aws-java-sdk-ec2:latest.release' + compile 'com.amazonaws:aws-java-sdk-autoscaling:latest.release' + compile 'com.amazonaws:aws-java-sdk-sts:latest.release' + compile 'com.amazonaws:aws-java-sdk-simpledb:latest.release' + + compile 'com.google.inject:guice:4.2.2' + compile 'com.google.inject.extensions:guice-servlet:4.2.2' compile 'org.quartz-scheduler:quartz:2.3.0' compile 'com.googlecode.json-simple:json-simple:1.1.1' - compile 'org.xerial.snappy:snappy-java:1.1.2.6' - compile 'org.yaml:snakeyaml:1.19' - compile 'org.apache.cassandra:cassandra-all:3.0.17' + compile 'org.xerial.snappy:snappy-java:1.1.7.3' + compile 'org.yaml:snakeyaml:1.25' + compile 'org.apache.cassandra:cassandra-all:4.1.0' compile 'javax.ws.rs:jsr311-api:1.1.1' - compile 'joda-time:joda-time:2.9.9' - compile 'org.apache.commons:commons-configuration2:2.1.1' - compile 'xerces:xercesImpl:2.11.0' - compile 'net.java.dev.jna:jna:4.4.0' - compile 'org.apache.httpcomponents:httpclient:4.5.3' - compile 'org.apache.httpcomponents:httpcore:4.4.6' + compile 'joda-time:joda-time:2.10.1' + compile 'org.apache.commons:commons-configuration2:2.4' + compile 'xerces:xercesImpl:2.12.0' + compile 'net.java.dev.jna:jna:5.2.0' + compile 'org.apache.httpcomponents:httpclient:4.5.6' + compile 'org.apache.httpcomponents:httpcore:4.4.11' compile 'com.ning:compress-lzf:1.0.4' - compile 'com.google.code.gson:gson:2.8.2' - compile 'org.slf4j:slf4j-api:1.7.25' - compile 'org.slf4j:slf4j-log4j12:1.7.25' + compile 'com.google.code.gson:gson:2.8.5' + compile 'org.slf4j:slf4j-api:1.7.28' + compile 'org.slf4j:slf4j-log4j12:1.7.28' compile 'org.bouncycastle:bcprov-jdk16:1.46' compile 'org.bouncycastle:bcpg-jdk16:1.46' - compile ('com.google.appengine.tools:appengine-gcs-client:0.7') { + compile ('com.google.appengine.tools:appengine-gcs-client:0.8') { exclude module: 'guava' } - compile 'com.google.apis:google-api-services-storage:v1-rev100-1.22.0' - compile 'com.google.http-client:google-http-client-jackson2:1.22.0' - compile 'com.netflix.spectator:spectator-api:0.74.2' + compile 'com.google.apis:google-api-services-storage:v1-rev141-1.25.0' + compile 'com.google.http-client:google-http-client-jackson2:1.28.0' + compile 'com.netflix.spectator:spectator-api:0.96.0' compileOnly 'javax.servlet:javax.servlet-api:3.1.0' - testCompile 'org.jmockit:jmockit:1.31' + testCompile 'org.jmockit:jmockit:1.38' testCompile "org.spockframework:spock-core:1.1-groovy-2.4" - testCompile 'junit:junit:4.12' + testCompile "com.google.truth:truth:1.0.1" + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.3.1' + testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.3.1' } sourceCompatibility = JavaVersion.VERSION_1_8 diff --git a/buildViaTravis.sh b/buildViaTravis.sh deleted file mode 100755 index 812026d7a..000000000 --- a/buildViaTravis.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# This script will build the project. - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Build Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - ./gradlew build --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Build Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - ./gradlew -Prelease.travisci=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" build snapshot --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Build Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - case "$TRAVIS_TAG" in - *-rc\.*) - ./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" candidate --stacktrace - ;; - *) - ./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" final --stacktrace - ;; - esac -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - ./gradlew build --stacktrace -fi diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 717f03890..68ca99ac4 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -2,4 +2,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.8.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip diff --git a/images/logo.jpg b/images/logo.jpg new file mode 100644 index 000000000..30ee04d4f Binary files /dev/null and b/images/logo.jpg differ diff --git a/images/priam.png b/images/priam.png new file mode 100644 index 000000000..b23dc5f09 Binary files /dev/null and b/images/priam.png differ diff --git a/installViaTravis.sh b/installViaTravis.sh deleted file mode 100755 index 06a86291c..000000000 --- a/installViaTravis.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# This script will build the project. - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Assemble Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - ./gradlew assemble --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Assemble Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - ./gradlew -Prelease.travisci=true assemble --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Assemble Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - ./gradlew -Prelease.travisci=true -Prelease.useLastTag=true assemble --stacktrace -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - ./gradlew assemble --stacktrace -fi diff --git a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/DataFetcher.java b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/DataFetcher.java index 81e6136ba..751d47b07 100644 --- a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/DataFetcher.java +++ b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/DataFetcher.java @@ -16,25 +16,21 @@ */ package com.netflix.priam.cassandra.extensions; +import com.google.common.base.Charsets; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.FilterInputStream; import java.net.HttpURLConnection; import java.net.URL; - -import com.google.common.base.Charsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class DataFetcher -{ +public class DataFetcher { private static final Logger logger = LoggerFactory.getLogger(DataFetcher.class); - public static String fetchData(String url) - { + public static String fetchData(String url) { DataInputStream responseStream = null; - try - { + try { HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection(); conn.setConnectTimeout(1000); conn.setReadTimeout(10000); @@ -46,29 +42,19 @@ public static String fetchData(String url) ByteArrayOutputStream bos = new ByteArrayOutputStream(); responseStream = new DataInputStream((FilterInputStream) conn.getContent()); int c = 0; - while ((c = responseStream.read(b, 0, b.length)) != -1) - bos.write(b, 0, c); + while ((c = responseStream.read(b, 0, b.length)) != -1) bos.write(b, 0, c); String return_ = new String(bos.toByteArray(), Charsets.UTF_8); logger.info("Calling URL API: {} returns: {}", url, return_); conn.disconnect(); return return_; - } - catch (Exception ex) - { + } catch (Exception ex) { throw new RuntimeException(ex); - } - finally - { - try - { - if(responseStream != null) - responseStream.close(); - } - catch (Exception e) - { + } finally { + try { + if (responseStream != null) responseStream.close(); + } catch (Exception e) { logger.warn("Failed to close response stream from priam", e); } } } - } diff --git a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/NFSeedProvider.java b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/NFSeedProvider.java index 784b5d2c7..0f5b7fc2c 100644 --- a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/NFSeedProvider.java +++ b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/NFSeedProvider.java @@ -16,37 +16,29 @@ */ package com.netflix.priam.cassandra.extensions; -import java.net.InetAddress; import java.util.ArrayList; import java.util.List; import java.util.Map; - +import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.locator.SeedProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * Retrieves the list of seeds from Priam. - */ -public class NFSeedProvider implements SeedProvider -{ +/** Retrieves the list of seeds from Priam. */ +public class NFSeedProvider implements SeedProvider { private static final Logger logger = LoggerFactory.getLogger(NFSeedProvider.class); - public NFSeedProvider(Map args) - { } + public NFSeedProvider(Map args) {} @Override - public List getSeeds() - { - List seeds = new ArrayList(); - try - { - String priamSeeds = DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds"); - for (String seed : priamSeeds.split(",")) - seeds.add(InetAddress.getByName(seed)); - } - catch (Exception e) - { + public List getSeeds() { + List seeds = new ArrayList(); + try { + String priamSeeds = + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds"); + for (String seed : priamSeeds.split(",")) seeds.add(InetAddressAndPort.getByName(seed)); + } catch (Exception e) { logger.error("Failed to load seed data", e); } return seeds; diff --git a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/PriamStartupAgent.java b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/PriamStartupAgent.java index ad644cb14..8a935a097 100644 --- a/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/PriamStartupAgent.java +++ b/priam-cass-extensions/src/main/java/com/netflix/priam/cassandra/extensions/PriamStartupAgent.java @@ -16,91 +16,85 @@ */ package com.netflix.priam.cassandra.extensions; +import java.lang.instrument.Instrumentation; +import java.util.Iterator; import org.apache.cassandra.utils.FBUtilities; import org.apache.commons.lang3.StringUtils; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; -import java.lang.instrument.Instrumentation; -import java.util.Iterator; - - /** - * A PreMain class - * to run inside of the cassandra process. Contacts Priam for essential cassandra startup information - * like token and seeds. + * A PreMain + * class to run inside of the cassandra process. Contacts Priam for essential cassandra startup + * information like token and seeds. */ -public class PriamStartupAgent -{ - public static String REPLACED_ADDRESS_MIN_VER = "1.2.11"; - public static void premain(String agentArgs, Instrumentation inst) - { +public class PriamStartupAgent { + public static String REPLACED_ADDRESS_MIN_VER = "1.2.11"; + + public static void premain(String agentArgs, Instrumentation inst) { PriamStartupAgent agent = new PriamStartupAgent(); agent.setPriamProperties(); } - private void setPriamProperties() - { + private void setPriamProperties() { String token = null; String seeds = null; boolean isReplace = false; String replacedIp = ""; String extraEnvParams = null; - - while (true) - { - try - { - token = DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_token"); - seeds = DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds"); - isReplace = Boolean.parseBoolean(DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/is_replace_token")); - replacedIp = DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_replaced_ip"); - extraEnvParams = DataFetcher.fetchData("http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_extra_env_params"); - } - catch (Exception e) - { - System.out.println("Failed to obtain startup data from priam, can not start yet. will retry shortly"); + while (true) { + try { + token = + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_token"); + seeds = + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_seeds"); + isReplace = + Boolean.parseBoolean( + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/is_replace_token")); + replacedIp = + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_replaced_ip"); + extraEnvParams = + DataFetcher.fetchData( + "http://127.0.0.1:8080/Priam/REST/v1/cassconfig/get_extra_env_params"); + + } catch (Exception e) { + System.out.println( + "Failed to obtain startup data from priam, can not start yet. will retry shortly"); e.printStackTrace(); } - - if (token != null && seeds != null) - break; - try - { + + if (token != null && seeds != null) break; + try { Thread.sleep(5 * 1000); - } - catch (InterruptedException e1) - { + } catch (InterruptedException e1) { // do nothing. } } - + System.setProperty("cassandra.initial_token", token); setExtraEnvParams(extraEnvParams); - if (isReplace) - { - System.out.println("Detect cassandra version : " + FBUtilities.getReleaseVersionString()); - if (FBUtilities.getReleaseVersionString().compareTo(REPLACED_ADDRESS_MIN_VER) < 0) - { - System.setProperty("cassandra.replace_token", token); - } else - { - System.setProperty("cassandra.replace_address", replacedIp); - } + if (isReplace) { + System.out.println( + "Detect cassandra version : " + FBUtilities.getReleaseVersionString()); + System.setProperty("cassandra.replace_address_first_boot", replacedIp); } - } - private void setExtraEnvParams(String extraEnvParams) { + private void setExtraEnvParams(String extraEnvParams) { try { if (null != extraEnvParams && extraEnvParams.length() > 0) { JSONParser parser = new JSONParser(); Object obj = parser.parse(extraEnvParams); JSONObject jsonObj = (JSONObject) obj; - if(jsonObj.size()>0) { + if (jsonObj.size() > 0) { for (Iterator iterator = jsonObj.keySet().iterator(); iterator.hasNext(); ) { String key = (String) iterator.next(); String val = (String) jsonObj.get(key); @@ -110,12 +104,12 @@ private void setExtraEnvParams(String extraEnvParams) { } } } - } - catch (Exception e) - { - System.out.println("Failed to parse extra env params: "+extraEnvParams+". However, ignoring the exception."); + } catch (Exception e) { + System.out.println( + "Failed to parse extra env params: " + + extraEnvParams + + ". However, ignoring the exception."); e.printStackTrace(); } } - } diff --git a/priam/build.gradle b/priam/build.gradle index 723f3d03d..bbafaa0aa 100644 --- a/priam/build.gradle +++ b/priam/build.gradle @@ -1 +1,8 @@ -apply plugin: 'groovy' \ No newline at end of file +apply plugin: 'groovy' + +/** + * This is from https://jmockit.github.io/tutorial/Introduction.html#runningTests + */ +test { + jvmArgs "-javaagent:${classpath.find { it.name.contains("jmockit") }.absolutePath}" +} diff --git a/priam/src/main/java/com/netflix/priam/PriamServer.java b/priam/src/main/java/com/netflix/priam/PriamServer.java index 7840ff649..36c8f5103 100644 --- a/priam/src/main/java/com/netflix/priam/PriamServer.java +++ b/priam/src/main/java/com/netflix/priam/PriamServer.java @@ -18,153 +18,133 @@ import com.google.inject.Inject; import com.google.inject.Singleton; -import com.netflix.priam.aws.UpdateCleanupPolicy; -import com.netflix.priam.aws.UpdateSecuritySettings; -import com.netflix.priam.backup.CommitLogBackupTask; -import com.netflix.priam.backup.IncrementalBackup; -import com.netflix.priam.backup.SnapshotBackup; -import com.netflix.priam.backup.parallel.IncrementalBackupProducer; -import com.netflix.priam.cluster.management.Compaction; -import com.netflix.priam.cluster.management.Flush; -import com.netflix.priam.cluster.management.IClusterManagement; -import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.backup.BackupService; +import com.netflix.priam.backupv2.BackupV2Service; +import com.netflix.priam.cluster.management.ClusterManagementService; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.config.PriamConfigurationPersister; import com.netflix.priam.defaultimpl.ICassandraProcess; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.health.CassandraMonitor; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.restore.RestoreContext; import com.netflix.priam.scheduler.PriamScheduler; -import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.services.SnapshotMetaService; -import com.netflix.priam.tuner.TuneCassandra; -import com.netflix.priam.utils.CassandraMonitor; +import com.netflix.priam.tuner.CassandraTunerService; import com.netflix.priam.utils.Sleeper; -import org.apache.commons.collections4.CollectionUtils; +import com.netflix.priam.utils.SystemUtils; +import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * Start all tasks here - Property update task - Backup task - Restore task - - * Incremental backup - */ +/** Start all tasks here - Property update task - Backup task - Restore task - Incremental backup */ @Singleton -public class PriamServer { +public class PriamServer implements IService { private final PriamScheduler scheduler; private final IConfiguration config; - private final IBackupRestoreConfig backupRestoreConfig; - private final InstanceIdentity id; + private final InstanceIdentity instanceIdentity; private final Sleeper sleeper; private final ICassandraProcess cassProcess; private final RestoreContext restoreContext; + private final IService backupV2Service; + private final IService backupService; + private final IService cassandraTunerService; + private final IService clusterManagementService; private static final int CASSANDRA_MONITORING_INITIAL_DELAY = 10; private static final Logger logger = LoggerFactory.getLogger(PriamServer.class); @Inject - public PriamServer(IConfiguration config, IBackupRestoreConfig backupRestoreConfig, PriamScheduler scheduler, InstanceIdentity id, Sleeper sleeper, ICassandraProcess cassProcess, RestoreContext restoreContext) { + public PriamServer( + IConfiguration config, + PriamScheduler scheduler, + InstanceIdentity id, + Sleeper sleeper, + ICassandraProcess cassProcess, + RestoreContext restoreContext, + BackupService backupService, + BackupV2Service backupV2Service, + CassandraTunerService cassandraTunerService, + ClusterManagementService clusterManagementService) { this.config = config; - this.backupRestoreConfig = backupRestoreConfig; this.scheduler = scheduler; - this.id = id; + this.instanceIdentity = id; this.sleeper = sleeper; this.cassProcess = cassProcess; this.restoreContext = restoreContext; + this.backupService = backupService; + this.backupV2Service = backupV2Service; + this.cassandraTunerService = cassandraTunerService; + this.clusterManagementService = clusterManagementService; } - public void initialize() throws Exception { - if (id.getInstance().isOutOfService()) - return; - - // start to schedule jobs - scheduler.start(); - - // update security settings. - if (config.isMultiDC()) { - scheduler.runTaskNow(UpdateSecuritySettings.class); - // sleep for 150 sec if this is a new node with new IP for SG to be updated by other seed nodes - if (id.isReplace() || id.isTokenPregenerated()) - sleeper.sleep(150 * 1000); - else if (UpdateSecuritySettings.firstTimeUpdated) - sleeper.sleep(60 * 1000); - - scheduler.addTask(UpdateSecuritySettings.JOBNAME, UpdateSecuritySettings.class, UpdateSecuritySettings.getTimer(id)); - } + private void createDirectories() throws IOException { + SystemUtils.createDirs(config.getBackupCommitLogLocation()); + SystemUtils.createDirs(config.getCommitLogLocation()); + SystemUtils.createDirs(config.getCacheLocation()); + SystemUtils.createDirs(config.getDataFileLocation()); + SystemUtils.createDirs(config.getLogDirLocation()); + SystemUtils.createDirs(config.getHintsLocation()); + } - // Run the task to tune Cassandra - scheduler.runTaskNow(TuneCassandra.class); - - // Start the snapshot backup schedule - Always run this. (If you want to - // set it off, set backup hour to -1) or set backup cron to "-1" - if (SnapshotBackup.getTimer(config) != null && (CollectionUtils.isEmpty(config.getBackupRacs()) || config.getBackupRacs().contains(config.getRac()))) { - scheduler.addTask(SnapshotBackup.JOBNAME, SnapshotBackup.class, SnapshotBackup.getTimer(config)); - - // Start the Incremental backup schedule if enabled - if (config.isIncrBackup()) { - if (!config.isIncrBackupParallelEnabled()) { - scheduler.addTask(IncrementalBackup.JOBNAME, IncrementalBackup.class, IncrementalBackup.getTimer()); - logger.info("Added incremental synchronous bkup"); - } else { - scheduler.addTask(IncrementalBackupProducer.JOBNAME, IncrementalBackupProducer.class, IncrementalBackupProducer.getTimer()); - logger.info("Added incremental async-synchronous bkup, next fired time: {}", IncrementalBackupProducer.getTimer().getTrigger().getNextFireTime()); - } - } + @Override + public void scheduleService() throws Exception { + // Create all the required directories for priam and Cassandra. + createDirectories(); - } + // Do not start Priam if you are out of service. + if (instanceIdentity.getInstance().isOutOfService()) return; - if (config.isBackingUpCommitLogs()) { - scheduler.addTask(CommitLogBackupTask.JOBNAME, CommitLogBackupTask.class, CommitLogBackupTask.getTimer(config)); - } + // start to schedule jobs + scheduler.start(); + // Set up cassandra tuning. + cassandraTunerService.scheduleService(); // Determine if we need to restore from backup else start cassandra. - if (restoreContext.isRestoreEnabled()){ + if (restoreContext.isRestoreEnabled()) { restoreContext.restore(); - } else { //no restores needed + } else { // no restores needed logger.info("No restore needed, task not scheduled"); - if (!config.doesCassandraStartManually()) - cassProcess.start(true); // Start cassandra. + if (!config.doesCassandraStartManually()) cassProcess.start(true); // Start cassandra. else - logger.info("config.doesCassandraStartManually() is set to True, hence Cassandra needs to be started manually ..."); + logger.info( + "config.doesCassandraStartManually() is set to True, hence Cassandra needs to be started manually ..."); } - /* * Run the delayed task (after 10 seconds) to Monitor Cassandra - * If Restore option is chosen, then Running Cassandra instance is stopped + * If Restore option is chosen, then Running Cassandra instance is stopped * Hence waiting for Cassandra to stop */ - scheduler.addTaskWithDelay(CassandraMonitor.JOBNAME, CassandraMonitor.class, CassandraMonitor.getTimer(), CASSANDRA_MONITORING_INITIAL_DELAY); - - - //Set cleanup - scheduler.addTask(UpdateCleanupPolicy.JOBNAME, UpdateCleanupPolicy.class, UpdateCleanupPolicy.getTimer()); - - //Set up nodetool flush task - TaskTimer flushTaskTimer = Flush.getTimer(config); - if (flushTaskTimer != null) { - scheduler.addTask(IClusterManagement.Task.FLUSH.name(), Flush.class, flushTaskTimer); - logger.info("Added nodetool flush task."); - } - - //Set up compaction task - TaskTimer compactionTimer = Compaction.getTimer(config); - if (compactionTimer != null) { - scheduler.addTask(IClusterManagement.Task.COMPACTION.name(), Compaction.class, compactionTimer); - logger.info("Added compaction task."); - } - - //Set up the SnapshotService - setUpSnapshotService(); + scheduler.addTaskWithDelay( + CassandraMonitor.JOBNAME, + CassandraMonitor.class, + CassandraMonitor.getTimer(), + CASSANDRA_MONITORING_INITIAL_DELAY); + + // Set up management services like flush, compactions etc. + clusterManagementService.scheduleService(); + + // Set up the background configuration dumping thread + scheduleTask( + scheduler, + PriamConfigurationPersister.class, + PriamConfigurationPersister.getTimer(config)); + + // Set up V1 Snapshot Service + backupService.scheduleService(); + + // Set up V2 Snapshot Service + backupV2Service.scheduleService(); } - private void setUpSnapshotService() throws Exception{ - TaskTimer snapshotMetaServiceTimer = SnapshotMetaService.getTimer(backupRestoreConfig); - if (snapshotMetaServiceTimer != null) { - scheduler.addTask(SnapshotMetaService.JOBNAME, SnapshotMetaService.class, snapshotMetaServiceTimer); - logger.info("Added SnapshotMetaService Task."); - } - } + @Override + public void updateServicePre() throws Exception {} + + @Override + public void updateServicePost() throws Exception {} - public InstanceIdentity getId() { - return id; + public InstanceIdentity getInstanceIdentity() { + return instanceIdentity; } public PriamScheduler getScheduler() { @@ -174,5 +154,4 @@ public PriamScheduler getScheduler() { public IConfiguration getConfiguration() { return config; } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/AWSMembership.java b/priam/src/main/java/com/netflix/priam/aws/AWSMembership.java index 66c4226e4..f9421aabe 100644 --- a/priam/src/main/java/com/netflix/priam/aws/AWSMembership.java +++ b/priam/src/main/java/com/netflix/priam/aws/AWSMembership.java @@ -17,85 +17,93 @@ package com.netflix.priam.aws; import com.amazonaws.services.autoscaling.AmazonAutoScaling; -import com.amazonaws.services.autoscaling.AmazonAutoScalingClient; +import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder; import com.amazonaws.services.autoscaling.model.*; import com.amazonaws.services.autoscaling.model.Instance; import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2Client; +import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; import com.amazonaws.services.ec2.model.*; import com.amazonaws.services.ec2.model.Filter; -import com.google.common.collect.Lists; +import com.google.common.collect.ImmutableSet; import com.google.inject.Inject; import com.google.inject.name.Named; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cred.ICredential; import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.InstanceEnvIdentity; +import com.netflix.priam.identity.config.InstanceInfo; +import java.util.*; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - /** - * Class to query amazon ASG for its members to provide - Number of valid nodes - * in the ASG - Number of zones - Methods for adding ACLs for the nodes + * Class to query amazon ASG for its members to provide - Number of valid nodes in the ASG - Number + * of zones - Methods for adding ACLs for the nodes */ public class AWSMembership implements IMembership { private static final Logger logger = LoggerFactory.getLogger(AWSMembership.class); private final IConfiguration config; private final ICredential provider; - private final InstanceEnvIdentity insEnvIdentity; + private final InstanceInfo instanceInfo; private final ICredential crossAccountProvider; @Inject - public AWSMembership(IConfiguration config, ICredential provider, @Named("awsec2roleassumption") ICredential crossAccountProvider, InstanceEnvIdentity insEnvIdentity) { + public AWSMembership( + IConfiguration config, + ICredential provider, + @Named("awsec2roleassumption") ICredential crossAccountProvider, + InstanceInfo instanceInfo) { this.config = config; this.provider = provider; - this.insEnvIdentity = insEnvIdentity; + this.instanceInfo = instanceInfo; this.crossAccountProvider = crossAccountProvider; } @Override - public List getRacMembership() { + public ImmutableSet getRacMembership() { AmazonAutoScaling client = null; try { List asgNames = new ArrayList<>(); - asgNames.add(config.getASGName()); + asgNames.add(instanceInfo.getAutoScalingGroup()); asgNames.addAll(Arrays.asList(config.getSiblingASGNames().split("\\s*,\\s*"))); client = getAutoScalingClient(); - DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(asgNames.toArray(new String[asgNames.size()])); + DescribeAutoScalingGroupsRequest asgReq = + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames( + asgNames.toArray(new String[asgNames.size()])); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); - List instanceIds = Lists.newArrayList(); + ImmutableSet.Builder instanceIds = ImmutableSet.builder(); for (AutoScalingGroup asg : res.getAutoScalingGroups()) { for (Instance ins : asg.getInstances()) - if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating") || ins.getLifecycleState().equalsIgnoreCase("shutting-down") || ins.getLifecycleState() - .equalsIgnoreCase("Terminated"))) + if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating") + || ins.getLifecycleState().equalsIgnoreCase("shutting-down") + || ins.getLifecycleState().equalsIgnoreCase("Terminated"))) instanceIds.add(ins.getInstanceId()); } if (logger.isInfoEnabled()) { - logger.info(String.format("Querying Amazon returned following instance in the RAC: %s, ASGs: %s --> %s", config.getRac(), StringUtils.join(asgNames, ","), StringUtils.join(instanceIds, ","))); + logger.info( + String.format( + "Querying Amazon returned following instance in the RAC: %s, ASGs: %s --> %s", + instanceInfo.getRac(), + StringUtils.join(asgNames, ","), + StringUtils.join(instanceIds, ","))); } - return instanceIds; + return instanceIds.build(); } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } - /** - * Actual membership AWS source of truth... - */ + /** Actual membership AWS source of truth... */ @Override public int getRacMembershipSize() { AmazonAutoScaling client = null; try { client = getAutoScalingClient(); - DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName()); + DescribeAutoScalingGroupsRequest asgReq = + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(instanceInfo.getAutoScalingGroup()); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); int size = 0; for (AutoScalingGroup asg : res.getAutoScalingGroups()) { @@ -104,36 +112,41 @@ public int getRacMembershipSize() { logger.info("Query on ASG returning {} instances", size); return size; } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } @Override - public List getCrossAccountRacMembership() { + public ImmutableSet getCrossAccountRacMembership() { AmazonAutoScaling client = null; try { List asgNames = new ArrayList<>(); - asgNames.add(config.getASGName()); + asgNames.add(instanceInfo.getAutoScalingGroup()); asgNames.addAll(Arrays.asList(config.getSiblingASGNames().split("\\s*,\\s*"))); client = getCrossAccountAutoScalingClient(); - DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(asgNames.toArray(new String[asgNames.size()])); + DescribeAutoScalingGroupsRequest asgReq = + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames( + asgNames.toArray(new String[asgNames.size()])); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); - List instanceIds = Lists.newArrayList(); + ImmutableSet.Builder instanceIds = ImmutableSet.builder(); for (AutoScalingGroup asg : res.getAutoScalingGroups()) { for (Instance ins : asg.getInstances()) - if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating") || ins.getLifecycleState().equalsIgnoreCase("shutting-down") || ins.getLifecycleState() - .equalsIgnoreCase("Terminated"))) + if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating") + || ins.getLifecycleState().equalsIgnoreCase("shutting-down") + || ins.getLifecycleState().equalsIgnoreCase("Terminated"))) instanceIds.add(ins.getInstanceId()); } if (logger.isInfoEnabled()) { - logger.info(String.format("Querying Amazon returned following instance in the cross-account ASG: %s --> %s", config.getRac(), StringUtils.join(instanceIds, ","))); + logger.info( + String.format( + "Querying Amazon returned following instance in the cross-account ASG: %s --> %s", + instanceInfo.getRac(), StringUtils.join(instanceIds, ","))); } - return instanceIds; + return instanceIds.build(); } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } @@ -142,34 +155,56 @@ public int getRacCount() { return config.getRacs().size(); } + private boolean isClassic() { + return instanceInfo.getInstanceEnvironment() == InstanceInfo.InstanceEnvironment.CLASSIC; + } + /** - * Adding peers' IPs as ingress to the running instance SG. The running instance could be in "classic" or "vpc" + * Adding peers' IPs as ingress to the running instance SG. The running instance could be in + * "classic" or "vpc" */ public void addACL(Collection listIPs, int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); - List ipPermissions = new ArrayList(); - ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to)); - - if (this.insEnvIdentity.isClassic()) { - client.authorizeSecurityGroupIngress(new AuthorizeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions)); + List ipPermissions = new ArrayList<>(); + ipPermissions.add( + new IpPermission() + .withFromPort(from) + .withIpProtocol("tcp") + .withIpRanges(listIPs) + .withToPort(to)); + + if (isClassic()) { + client.authorizeSecurityGroupIngress( + new AuthorizeSecurityGroupIngressRequest( + config.getACLGroupName(), ipPermissions)); if (logger.isInfoEnabled()) { logger.info("Done adding ACL to classic: " + StringUtils.join(listIPs, ",")); } } else { - AuthorizeSecurityGroupIngressRequest sgIngressRequest = new AuthorizeSecurityGroupIngressRequest(); - sgIngressRequest.withGroupId(getVpcGoupId()); //fetch SG group id for vpc account of the running instance. - client.authorizeSecurityGroupIngress(sgIngressRequest.withIpPermissions(ipPermissions)); //Adding peers' IPs as ingress to the running instance SG + // Adding peers' IPs as ingress to the running instance SG + AuthorizeSecurityGroupIngressRequest sgIngressRequest = + new AuthorizeSecurityGroupIngressRequest() + .withGroupId(getVpcGoupId()) + .withIpPermissions(ipPermissions); + // fetch SG group id for vpc account of the running instance. + int status = + client.authorizeSecurityGroupIngress(sgIngressRequest) + .getSdkHttpMetadata() + .getHttpStatusCode(); + if (status != 200) { + logger.warn( + "We might have too many ingress rules saw http status {} when updating", + status); + } if (logger.isInfoEnabled()) { logger.info("Done adding ACL to vpc: " + StringUtils.join(listIPs, ",")); } } - } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } @@ -180,66 +215,84 @@ protected String getVpcGoupId() { AmazonEC2 client = null; try { client = getEc2Client(); - Filter nameFilter = new Filter().withName("group-name").withValues(config.getACLGroupName()); //SG - Filter vpcFilter = new Filter().withName("vpc-id").withValues(config.getVpcId()); + Filter nameFilter = + new Filter().withName("group-name").withValues(config.getACLGroupName()); // SG + Filter vpcFilter = new Filter().withName("vpc-id").withValues(instanceInfo.getVpcId()); - DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter); + DescribeSecurityGroupsRequest req = + new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter); DescribeSecurityGroupsResult result = client.describeSecurityGroups(req); for (SecurityGroup group : result.getSecurityGroups()) { - logger.debug("got group-id:{} for group-name:{},vpc-id:{}", group.getGroupId(), config.getACLGroupName(), config.getVpcId()); + logger.debug( + "got group-id:{} for group-name:{},vpc-id:{}", + group.getGroupId(), + config.getACLGroupName(), + instanceInfo.getVpcId()); return group.getGroupId(); } - logger.error("unable to get group-id for group-name={} vpc-id={}", config.getACLGroupName(), config.getVpcId()); + logger.error( + "unable to get group-id for group-name={} vpc-id={}", + config.getACLGroupName(), + instanceInfo.getVpcId()); return ""; } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } - /** - * removes a iplist from the SG - */ + /** removes a iplist from the SG */ public void removeACL(Collection listIPs, int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); - List ipPermissions = new ArrayList(); - ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to)); - - if (this.insEnvIdentity.isClassic()) { - client.revokeSecurityGroupIngress(new RevokeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions)); + List ipPermissions = new ArrayList<>(); + ipPermissions.add( + new IpPermission() + .withFromPort(from) + .withIpProtocol("tcp") + .withIpRanges(listIPs) + .withToPort(to)); + + if (isClassic()) { + client.revokeSecurityGroupIngress( + new RevokeSecurityGroupIngressRequest( + config.getACLGroupName(), ipPermissions)); if (logger.isInfoEnabled()) { - logger.info("Done removing from ACL within classic env for running instance: " + StringUtils.join(listIPs, ",")); + logger.info( + "Done removing from ACL within classic env for running instance: " + + StringUtils.join(listIPs, ",")); } } else { RevokeSecurityGroupIngressRequest req = new RevokeSecurityGroupIngressRequest(); - req.withGroupId(getVpcGoupId()); //fetch SG group id for vpc account of the running instance. - client.revokeSecurityGroupIngress(req.withIpPermissions(ipPermissions)); //Adding peers' IPs as ingress to the running instance SG + // fetch SG group id for vpc account of the running instance. + req.withGroupId(getVpcGoupId()); + // Adding peers' IPs as ingress to the running instance SG + client.revokeSecurityGroupIngress(req.withIpPermissions(ipPermissions)); if (logger.isInfoEnabled()) { - logger.info("Done removing from ACL within vpc env for running instance: " + StringUtils.join(listIPs, ",")); + logger.info( + "Done removing from ACL within vpc env for running instance: " + + StringUtils.join(listIPs, ",")); } } - } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } - /** - * List SG ACL's - */ - public List listACL(int from, int to) { + /** List SG ACL's */ + public ImmutableSet listACL(int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); - List ipPermissions = new ArrayList(); + ImmutableSet.Builder ipPermissions = ImmutableSet.builder(); - if (this.insEnvIdentity.isClassic()) { + if (isClassic()) { - DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withGroupNames(Arrays.asList(config.getACLGroupName())); + DescribeSecurityGroupsRequest req = + new DescribeSecurityGroupsRequest() + .withGroupNames( + Collections.singletonList(config.getACLGroupName())); DescribeSecurityGroupsResult result = client.describeSecurityGroups(req); for (SecurityGroup group : result.getSecurityGroups()) for (IpPermission perm : group.getIpPermissions()) @@ -249,14 +302,18 @@ public List listACL(int from, int to) { logger.debug("Fetch current permissions for classic env of running instance"); } else { - Filter nameFilter = new Filter().withName("group-name").withValues(config.getACLGroupName()); - String vpcid = config.getVpcId(); + Filter nameFilter = + new Filter().withName("group-name").withValues(config.getACLGroupName()); + String vpcid = instanceInfo.getVpcId(); if (vpcid == null || vpcid.isEmpty()) { - throw new IllegalStateException("vpcid is null even though instance is running in vpc."); + throw new IllegalStateException( + "vpcid is null even though instance is running in vpc."); } - Filter vpcFilter = new Filter().withName("vpc-id").withValues(vpcid); //only fetch SG for the vpc id of the running instance - DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter); + // only fetch SG for the vpc id of the running instance + Filter vpcFilter = new Filter().withName("vpc-id").withValues(vpcid); + DescribeSecurityGroupsRequest req = + new DescribeSecurityGroupsRequest().withFilters(nameFilter, vpcFilter); DescribeSecurityGroupsResult result = client.describeSecurityGroups(req); for (SecurityGroup group : result.getSecurityGroups()) for (IpPermission perm : group.getIpPermissions()) @@ -266,11 +323,9 @@ public List listACL(int from, int to) { logger.debug("Fetch current permissions for vpc env of running instance"); } - - return ipPermissions; + return ipPermissions.build(); } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } @@ -279,7 +334,9 @@ public void expandRacMembership(int count) { AmazonAutoScaling client = null; try { client = getAutoScalingClient(); - DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName()); + DescribeAutoScalingGroupsRequest asgReq = + new DescribeAutoScalingGroupsRequest() + .withAutoScalingGroupNames(instanceInfo.getAutoScalingGroup()); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); AutoScalingGroup asg = res.getAutoScalingGroups().get(0); UpdateAutoScalingGroupRequest ureq = new UpdateAutoScalingGroupRequest(); @@ -289,26 +346,28 @@ public void expandRacMembership(int count) { ureq.setDesiredCapacity(asg.getMinSize() + 1); client.updateAutoScalingGroup(ureq); } finally { - if (client != null) - client.shutdown(); + if (client != null) client.shutdown(); } } protected AmazonAutoScaling getAutoScalingClient() { - AmazonAutoScaling client = new AmazonAutoScalingClient(provider.getAwsCredentialProvider()); - client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com"); - return client; + return AmazonAutoScalingClientBuilder.standard() + .withCredentials(provider.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); } protected AmazonAutoScaling getCrossAccountAutoScalingClient() { - AmazonAutoScaling client = new AmazonAutoScalingClient(crossAccountProvider.getAwsCredentialProvider()); - client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com"); - return client; + return AmazonAutoScalingClientBuilder.standard() + .withCredentials(crossAccountProvider.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); } protected AmazonEC2 getEc2Client() { - AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider()); - client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com"); - return client; + return AmazonEC2ClientBuilder.standard() + .withCredentials(provider.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/DataPart.java b/priam/src/main/java/com/netflix/priam/aws/DataPart.java index 9db9a6e38..e7e36ede6 100644 --- a/priam/src/main/java/com/netflix/priam/aws/DataPart.java +++ b/priam/src/main/java/com/netflix/priam/aws/DataPart.java @@ -18,10 +18,7 @@ import com.netflix.priam.utils.SystemUtils; -/** - * Class for holding part data of a backup file, - * which will be used for multi-part uploading - */ +/** Class for holding part data of a backup file, which will be used for multi-part uploading */ public class DataPart { private final String bucketName; private final String uploadID; diff --git a/priam/src/main/java/com/netflix/priam/aws/RemoteBackupPath.java b/priam/src/main/java/com/netflix/priam/aws/RemoteBackupPath.java new file mode 100644 index 000000000..b3616d10a --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/aws/RemoteBackupPath.java @@ -0,0 +1,242 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.aws; + +import com.google.api.client.util.Lists; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.inject.Inject; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Optional; + +/** + * Represents location of an object on the remote file system. All the objects will be keyed with a + * common prefix (based on configuration, typically environment), name of the cluster and token of + * this instance. + */ +public class RemoteBackupPath extends AbstractBackupPath { + private static final ImmutableSet V2_ONLY_FILE_TYPES = + ImmutableSet.of( + BackupFileType.META_V2, + BackupFileType.SST_V2, + BackupFileType.SECONDARY_INDEX_V2); + + @Inject + public RemoteBackupPath(IConfiguration config, InstanceIdentity factory) { + super(config, factory); + } + + private ImmutableList.Builder getV2Prefix() { + ImmutableList.Builder prefix = ImmutableList.builder(); + prefix.add(baseDir, prependHash(clusterName), token); + return prefix; + } + + /* This will ensure that there is some randomness in the path at the start so that remote file systems + can hash the contents better when we have lot of clusters backing up at the same remote location. + */ + private String prependHash(String appName) { + return String.format("%d_%s", appName.hashCode() % 10000, appName); + } + + private String removeHash(String appNameWithHash) { + int hash = Integer.parseInt(appNameWithHash.substring(0, appNameWithHash.indexOf("_"))); + String appName = appNameWithHash.substring(appNameWithHash.indexOf("_") + 1); + Preconditions.checkArgument( + hash == appName.hashCode() % 10000, + "Prepended hash does not match app name. Should have received: " + + prependHash(appName)); + return appName; + } + + /* + * This method will generate the location for the V2 backups. + * Note that we use epochMillis to sort the directory instead of traditional YYYYMMddHHmm. This will allow greater + * flexibility when doing restores as the s3 list calls with common prefix will have greater chance of match instead + * of traditional way (where it takes lot of s3 list calls when month or year changes). + * Another major difference w.r.t. V1 is having no distinction between SNAP and SST files as we upload SSTables only + * once to remote file system. + */ + private String getV2Location() { + ImmutableList.Builder parts = getV2Prefix(); + // JDK-8177809 truncate to seconds to ensure consistent behavior with our old method of + // getting lastModified time (File::lastModified) in Java 8. + long lastModified = getLastModified().toEpochMilli() / 1_000L * 1_000L; + parts.add(type.toString(), lastModified + ""); + if (BackupFileType.isDataFile(type)) { + parts.add(keyspace, columnFamily); + } + if (type == BackupFileType.SECONDARY_INDEX_V2) { + parts.add(indexDir); + } + parts.add(getCompression().toString(), getEncryption().toString(), fileName); + return toPath(parts.build()).toString(); + } + + private void parseV2Location(Path remotePath) { + Preconditions.checkArgument( + remotePath.getNameCount() >= 8, + String.format("%s has fewer than %d parts", remotePath, 8)); + int index = 0; + baseDir = remotePath.getName(index++).toString(); + clusterName = removeHash(remotePath.getName(index++).toString()); + token = remotePath.getName(index++).toString(); + type = BackupFileType.valueOf(remotePath.getName(index++).toString()); + String lastModified = remotePath.getName(index++).toString(); + setLastModified(Instant.ofEpochMilli(Long.parseLong(lastModified))); + List parts = Lists.newArrayListWithCapacity(4); + if (BackupFileType.isDataFile(type)) { + keyspace = remotePath.getName(index++).toString(); + columnFamily = remotePath.getName(index++).toString(); + parts.add(keyspace); + parts.add(columnFamily); + } + if (type == BackupFileType.SECONDARY_INDEX_V2) { + indexDir = remotePath.getName(index++).toString(); + parts.add(indexDir); + } + setCompression(CompressionType.valueOf(remotePath.getName(index++).toString())); + setEncryption(remotePath.getName(index++).toString()); + fileName = remotePath.getName(index).toString(); + parts.add(fileName); + this.backupFile = + Paths.get(config.getDataFileLocation(), parts.toArray(new String[] {})).toFile(); + } + + private String getV1Location() { + ImmutableList.Builder parts = ImmutableList.builder(); + String timeString = DateUtil.formatyyyyMMddHHmm(time); + parts.add(baseDir, region, clusterName, token, timeString, type.toString()); + if (BackupFileType.isDataFile(type)) { + parts.add(keyspace, columnFamily); + } + parts.add(fileName); + return toPath(parts.build()).toString(); + } + + private Path toPath(ImmutableList parts) { + return Paths.get(parts.get(0), parts.subList(1, parts.size()).toArray(new String[0])); + } + + private void parseV1Location(Path remotePath) { + Preconditions.checkArgument( + remotePath.getNameCount() >= 7, + String.format("%s has fewer than %d parts", remotePath, 7)); + parseV1Prefix(remotePath); + time = DateUtil.getDate(remotePath.getName(4).toString()); + type = BackupFileType.valueOf(remotePath.getName(5).toString()); + if (BackupFileType.isDataFile(type)) { + keyspace = remotePath.getName(6).toString(); + columnFamily = remotePath.getName(7).toString(); + } + fileName = remotePath.getName(remotePath.getNameCount() - 1).toString(); + } + + private void parseV1Prefix(Path remotePath) { + Preconditions.checkArgument( + remotePath.getNameCount() >= 4, + String.format("%s needs %d parts to parse prefix", remotePath, 4)); + baseDir = remotePath.getName(0).toString(); + region = remotePath.getName(1).toString(); + clusterName = remotePath.getName(2).toString(); + token = remotePath.getName(3).toString(); + } + + /** + * Format of backup path: 1. For old style backups: + * BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNAP|META]/KEYSPACE/COLUMNFAMILY/FILE + * + *

2. For new style backups (SnapshotMetaService) + * BASE/[cluster_name_hash]_cluster/TOKEN//[META_V2|SST_V2]/KEYSPACE/COLUMNFAMILY/[last_modified_time_ms]/FILE.compression + */ + @Override + public String getRemotePath() { + return V2_ONLY_FILE_TYPES.contains(type) ? getV2Location() : getV1Location(); + } + + @Override + public void parseRemote(String remotePath) { + // Hack to determine type in advance of parsing. Will disappear once v1 is retired + Optional inferredType = + Arrays.stream(BackupFileType.values()) + .filter(bft -> remotePath.contains(PATH_SEP + bft.toString() + PATH_SEP)) + .findAny() + .filter(V2_ONLY_FILE_TYPES::contains); + if (inferredType.isPresent()) { + parseV2Location(Paths.get(remotePath)); + } else { + parseV1Location(Paths.get(remotePath)); + } + } + + @Override + public void parsePartialPrefix(String remoteFilePath) { + parseV1Prefix(Paths.get(remoteFilePath)); + } + + @Override + public String remotePrefix(Date start, Date end, String location) { + return PATH_JOINER.join( + clusterPrefix(location), + instanceIdentity.getInstance().getToken(), + match(start, end)); + } + + @Override + public Path remoteV2Prefix(Path location, BackupFileType fileType) { + if (location.getNameCount() <= 1) { + baseDir = config.getBackupLocation(); + clusterName = config.getAppName(); + } else if (location.getNameCount() >= 3) { + baseDir = location.getName(1).toString(); + clusterName = removeHash(location.getName(2).toString()); + } + token = instanceIdentity.getInstance().getToken(); + ImmutableList.Builder parts = getV2Prefix(); + parts.add(fileType.toString()); + return toPath(parts.build()); + } + + @Override + public String clusterPrefix(String location) { + String[] elements = location.split(String.valueOf(RemoteBackupPath.PATH_SEP)); + Preconditions.checkArgument( + elements.length < 2 || elements.length > 3, + "Path must have fewer than 2 or greater than 3 elements. Saw " + location); + if (elements.length <= 1) { + baseDir = config.getBackupLocation(); + region = instanceIdentity.getInstanceInfo().getRegion(); + clusterName = config.getAppName(); + } else { + baseDir = elements[1]; + region = elements[2]; + clusterName = elements[3]; + } + return PATH_JOINER.join(baseDir, region, clusterName, ""); // "" ensures a trailing "/" + } +} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3BackupPath.java b/priam/src/main/java/com/netflix/priam/aws/S3BackupPath.java deleted file mode 100644 index 292de2082..000000000 --- a/priam/src/main/java/com/netflix/priam/aws/S3BackupPath.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.aws; - -import com.google.common.collect.Lists; -import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.identity.InstanceIdentity; - -import java.util.Date; -import java.util.List; - -/** - * Represents an S3 object key - */ -public class S3BackupPath extends AbstractBackupPath { - /* - * Checking if request came from Cassandra 1.0 or 1.1 - * In Cassandra 1.0, Number of path elements = 8 - * In Cassandra 1.1, Number of path elements = 9 - */ - private static final int NUM_PATH_ELEMENTS_CASS_1_0 = 8; - - @Inject - public S3BackupPath(IConfiguration config, InstanceIdentity factory) { - super(config, factory); - } - - /** - * Format of backup path: - * Cassandra 1.0 - * BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNP|META]/KEYSPACE/FILE - * Cassandra 1.1 - * BASE/REGION/CLUSTER/TOKEN/[SNAPSHOTTIME]/[SST|SNP|META]/KEYSPACE/COLUMNFAMILY/FILE - */ - @Override - public String getRemotePath() { - StringBuffer buff = new StringBuffer(); - buff.append(baseDir).append(S3BackupPath.PATH_SEP); // Base dir - buff.append(region).append(S3BackupPath.PATH_SEP); - buff.append(clusterName).append(S3BackupPath.PATH_SEP);// Cluster name - buff.append(token).append(S3BackupPath.PATH_SEP); - buff.append(formatDate(time)).append(S3BackupPath.PATH_SEP); - buff.append(type).append(S3BackupPath.PATH_SEP); - if (BackupFileType.isDataFile(type)) { - if (isCassandra1_0) - buff.append(keyspace).append(S3BackupPath.PATH_SEP); - else - buff.append(keyspace).append(S3BackupPath.PATH_SEP).append(columnFamily).append(S3BackupPath.PATH_SEP); - } - buff.append(fileName); - return buff.toString(); - } - - @Override - public void parseRemote(String remoteFilePath) { - String[] elements = remoteFilePath.split(String.valueOf(S3BackupPath.PATH_SEP)); - // parse out things which are empty - List pieces = Lists.newArrayList(); - for (String ele : elements) { - if (ele.equals("")) - continue; - pieces.add(ele); - } - assert pieces.size() >= 7 : "Too few elements in path " + remoteFilePath; - if (pieces.size() == NUM_PATH_ELEMENTS_CASS_1_0) - setCassandra1_0(true); - baseDir = pieces.get(0); - region = pieces.get(1); - clusterName = pieces.get(2); - token = pieces.get(3); - time = parseDate(pieces.get(4)); - type = BackupFileType.valueOf(pieces.get(5)); - if (BackupFileType.isDataFile(type)) { - keyspace = pieces.get(6); - if (!isCassandra1_0) - columnFamily = pieces.get(7); - } - // append the rest - fileName = pieces.get(pieces.size() - 1); - } - - @Override - public void parsePartialPrefix(String remoteFilePath) { - String[] elements = remoteFilePath.split(String.valueOf(S3BackupPath.PATH_SEP)); - // parse out things which are empty - List pieces = Lists.newArrayList(); - for (String ele : elements) { - if (ele.equals("")) - continue; - pieces.add(ele); - } - assert pieces.size() >= 4 : "Too few elements in path " + remoteFilePath; - baseDir = pieces.get(0); - region = pieces.get(1); - clusterName = pieces.get(2); - token = pieces.get(3); - } - - @Override - public String remotePrefix(Date start, Date end, String location) { - StringBuffer buff = new StringBuffer(clusterPrefix(location)); - token = factory.getInstance().getToken(); - buff.append(token).append(S3BackupPath.PATH_SEP); - // match the common characters to prefix. - buff.append(match(start, end)); - return buff.toString(); - } - - @Override - public String clusterPrefix(String location) { - StringBuffer buff = new StringBuffer(); - String[] elements = location.split(String.valueOf(S3BackupPath.PATH_SEP)); - if (elements.length <= 1) { - baseDir = config.getBackupLocation(); - region = config.getDC(); - clusterName = config.getAppName(); - } else { - assert elements.length >= 4 : "Too few elements in path " + location; - baseDir = elements[1]; - region = elements[2]; - clusterName = elements[3]; - } - buff.append(baseDir).append(S3BackupPath.PATH_SEP); - buff.append(region).append(S3BackupPath.PATH_SEP); - buff.append(clusterName).append(S3BackupPath.PATH_SEP); - - return buff.toString(); - } - -} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java index af058ae95..0676b859a 100755 --- a/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3CrossAccountFileSystem.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws; @@ -20,38 +18,43 @@ import com.google.inject.Inject; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.aws.auth.IS3Credential; import com.netflix.priam.backup.IBackupFileSystem; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /* * A version of S3FileSystem which allows it api access across different AWS accounts. - * + * * *Note: ideally, this object should extend S3FileSystem but could not be done because: * - S3FileSystem is a singleton and it uses DI. To follow the DI pattern, the best way to get this singleton is via injection. - * - S3FileSystem registers a MBean to JMX which must be only once per JVM. If not, you get + * - S3FileSystem registers a MBean to JMX which must be only once per JVM. If not, you get * java.lang.RuntimeException: javax.management.InstanceAlreadyExistsException: com.priam.aws.S3FileSystemMBean:name=S3FileSystemMBean - * - + * - */ @Singleton public class S3CrossAccountFileSystem { private static final Logger logger = LoggerFactory.getLogger(S3CrossAccountFileSystem.class); private AmazonS3 s3Client; - private S3FileSystem s3fs; - private IConfiguration config; - private IS3Credential s3Credential; + private final S3FileSystem s3fs; + private final IConfiguration config; + private final IS3Credential s3Credential; + private final InstanceInfo instanceInfo; @Inject - public S3CrossAccountFileSystem(@Named("backup") IBackupFileSystem fs, @Named("awss3roleassumption") IS3Credential s3Credential, IConfiguration config) { - + public S3CrossAccountFileSystem( + @Named("backup") IBackupFileSystem fs, + @Named("awss3roleassumption") IS3Credential s3Credential, + IConfiguration config, + InstanceInfo instanceInfo) { this.s3fs = (S3FileSystem) fs; this.config = config; this.s3Credential = s3Credential; - + this.instanceInfo = instanceInfo; } public IBackupFileSystem getBackupFileSystem() { @@ -62,29 +65,30 @@ public AmazonS3 getCrossAcctS3Client() { if (this.s3Client == null) { synchronized (this) { - if (this.s3Client == null) { try { - this.s3Client = AmazonS3Client.builder().withCredentials(s3Credential.getAwsCredentialProvider()).withRegion(config.getDC()).build(); + this.s3Client = + AmazonS3Client.builder() + .withCredentials(s3Credential.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); } catch (Exception e) { - throw new IllegalStateException("Exception in getting handle to s3 client. Msg: " + e.getLocalizedMessage(), e); - + throw new IllegalStateException( + "Exception in getting handle to s3 client. Msg: " + + e.getLocalizedMessage(), + e); } - //Lets leverage the IBackupFileSystem behaviors except we want it to use our amazon S3 client which has cross AWS account api capability. + // Lets leverage the IBackupFileSystem behaviors except we want it to use our + // amazon S3 client which has cross AWS account api capability. this.s3fs.setS3Client(s3Client); - } - } - } - return this.s3Client; } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java index f36a2d3a1..eeeaa2ca4 100755 --- a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystem.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws; @@ -25,187 +23,174 @@ import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.cred.ICredential; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.backup.DynamicRateLimiter; import com.netflix.priam.backup.RangeReadInputStream; +import com.netflix.priam.compress.ChunkedStream; import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.cred.ICredential; import com.netflix.priam.cryptography.IFileCryptography; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.merics.BackupMetrics; import com.netflix.priam.notification.BackupNotificationMgr; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.MBeanServer; -import javax.management.ObjectName; import java.io.*; -import java.lang.management.ManagementFactory; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; import java.util.Iterator; import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Implementation of IBackupFileSystem for S3. The upload/download will work with ciphertext. - */ +/** Implementation of IBackupFileSystem for S3. The upload/download will work with ciphertext. */ @Singleton -public class S3EncryptedFileSystem extends S3FileSystemBase implements S3EncryptedFileSystemMBean { +public class S3EncryptedFileSystem extends S3FileSystemBase { private static final Logger logger = LoggerFactory.getLogger(S3EncryptedFileSystem.class); - private AtomicInteger uploadCount = new AtomicInteger(); - private IFileCryptography encryptor; + private final IFileCryptography encryptor; + private final DynamicRateLimiter dynamicRateLimiter; @Inject - public S3EncryptedFileSystem(Provider pathProvider, ICompression compress, final IConfiguration config, ICredential cred - , @Named("filecryptoalgorithm") IFileCryptography fileCryptography - , BackupMetrics backupMetrics, - BackupNotificationMgr backupNotificationMgr - ) { + public S3EncryptedFileSystem( + Provider pathProvider, + ICompression compress, + final IConfiguration config, + ICredential cred, + @Named("filecryptoalgorithm") IFileCryptography fileCryptography, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + InstanceInfo instanceInfo, + DynamicRateLimiter dynamicRateLimiter) { super(pathProvider, compress, config, backupMetrics, backupNotificationMgr); this.encryptor = fileCryptography; - - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - String mbeanName = ENCRYPTED_FILE_SYSTEM_MBEAN_NAME; - try { - mbs.registerMBean(this, new ObjectName(mbeanName)); - } catch (Exception e) { - throw new RuntimeException("Unable to regiser JMX bean: " + mbeanName + " to JMX server. Msg: " + e.getLocalizedMessage(), e); - } - - super.s3Client = AmazonS3Client.builder().withCredentials(cred.getAwsCredentialProvider()).withRegion(config.getDC()).build(); + this.dynamicRateLimiter = dynamicRateLimiter; + super.s3Client = + AmazonS3Client.builder() + .withCredentials(cred.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); } @Override - /* - Note: provides same information as getBytesUploaded() but it's meant for S3FileSystemMBean object types. - */ - public long bytesUploaded() { - return bytesUploaded.get(); - } - - - @Override - public long bytesDownloaded() { - return bytesDownloaded.get(); - } - - @Override - public void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException { - try { - - RangeReadInputStream rris = new RangeReadInputStream(s3Client, getPrefix(config), path); - - /* + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + String remotePath = path.getRemotePath(); + Path localPath = Paths.get(path.newRestoreFile().getAbsolutePath() + suffix); + try (OutputStream os = new FileOutputStream(localPath.toFile()); + RangeReadInputStream rris = + new RangeReadInputStream( + s3Client, getShard(), super.getFileSize(remotePath), remotePath)) { + /* * To handle use cases where decompression should be done outside of the download. For example, the file have been compressed and then encrypted. - * Hence, decompressing it here would compromise the decryption. - */ - try { - IOUtils.copyLarge(rris, os); - - } catch (Exception ex) { - - throw new BackupRestoreException("Exception encountered when copying bytes from input to output during download", ex); - - } finally { - IOUtils.closeQuietly(rris); - IOUtils.closeQuietly(os); - } - + * Hence, decompressing it here would compromise the decryption. + */ + IOUtils.copyLarge(rris, os); } catch (Exception e) { - throw new BackupRestoreException("Exception encountered downloading " + path.getRemotePath() + " from S3 bucket " + getPrefix(config) - + ", Msg: " + e.getMessage(), e); + throw new BackupRestoreException( + "Exception encountered downloading " + + remotePath + + " from S3 bucket " + + getShard() + + ", Msg: " + + e.getMessage(), + e); } } - @Override - public void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException { - - InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(), path.getRemotePath()); //initialize chunking request to aws - InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); //Fetch the aws generated upload id for this chunking request - DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId()); - List partETags = Lists.newArrayList(); //Metadata on number of parts to be uploaded - - - //== Read chunks from src, compress it, and write to temp file - String compressedFileName = path.newRestoreFile() + ".compressed"; - logger.debug("Compressing {} with chunk size {}", compressedFileName, chunkSize); - File compressedDstFile = null; - FileOutputStream compressedDstFileOs = null; - BufferedOutputStream compressedBos = null; - try { - - compressedDstFile = new File(compressedFileName); - compressedDstFileOs = new FileOutputStream(compressedDstFile); - compressedBos = new BufferedOutputStream(compressedDstFileOs); - - } catch (FileNotFoundException e) { - throw new BackupRestoreException("Not able to find temporary compressed file: " + compressedFileName); - } - - try { - - Iterator compressedChunks = this.compress.compress(in, chunkSize); + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + Path localPath = Paths.get(path.getBackupFile().getAbsolutePath()); + String remotePath = path.getRemotePath(); + + long chunkSize = getChunkSize(localPath); + // initialize chunking request to aws + InitiateMultipartUploadRequest initRequest = + new InitiateMultipartUploadRequest(config.getBackupPrefix(), remotePath); + // Fetch the aws generated upload id for this chunking request + InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); + DataPart part = + new DataPart(config.getBackupPrefix(), remotePath, initResponse.getUploadId()); + // Metadata on number of parts to be uploaded + List partETags = Lists.newArrayList(); + + // Read chunks from src, compress it, and write to temp file + File compressedDstFile = new File(localPath.toString() + ".compressed"); + if (logger.isDebugEnabled()) + logger.debug( + "Compressing {} with chunk size {}", + compressedDstFile.getAbsolutePath(), + chunkSize); + + try (InputStream in = new FileInputStream(localPath.toFile()); + BufferedOutputStream compressedBos = + new BufferedOutputStream(new FileOutputStream(compressedDstFile))) { + Iterator compressedChunks = + new ChunkedStream(in, chunkSize, path.getCompression()); while (compressedChunks.hasNext()) { byte[] compressedChunk = compressedChunks.next(); compressedBos.write(compressedChunk); } - - } catch (IOException e) { - String message = String.format("Exception in compressing the input data during upload to EncryptedStore Msg: " + e.getMessage()); + } catch (Exception e) { + String message = + "Exception in compressing the input data during upload to EncryptedStore Msg: " + + e.getMessage(); logger.error(message, e); throw new BackupRestoreException(message); - } finally { - IOUtils.closeQuietly(in); - IOUtils.closeQuietly(compressedBos); } - //== Read compressed data, encrypt each chunk, upload it to aws - FileInputStream compressedFileIs = null; - BufferedInputStream compressedBis = null; - try { + // == Read compressed data, encrypt each chunk, upload it to aws + try (BufferedInputStream compressedBis = + new BufferedInputStream(new FileInputStream(compressedDstFile))) { + Iterator chunks = this.encryptor.encryptStream(compressedBis, remotePath); - compressedFileIs = new FileInputStream(new File(compressedFileName)); - compressedBis = new BufferedInputStream(compressedFileIs); - Iterator chunks = this.encryptor.encryptStream(compressedBis, path.getRemotePath()); + // identifies this part position in the object we are uploading + int partNum = 0; + long encryptedFileSize = 0; - int partNum = 0; //identifies this part position in the object we are uploading while (chunks.hasNext()) { byte[] chunk = chunks.next(); - rateLimiter.acquire(chunk.length); //throttle upload to endpoint - - DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId()); + // throttle upload to endpoint + rateLimiter.acquire(chunk.length); + dynamicRateLimiter.acquire(path, target, chunk.length); + + DataPart dp = + new DataPart( + ++partNum, + chunk, + config.getBackupPrefix(), + remotePath, + initResponse.getUploadId()); S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags); + encryptedFileSize += chunk.length; executor.submit(partUploader); - - bytesUploaded.addAndGet(chunk.length); } executor.sleepTillEmpty(); if (partNum != partETags.size()) { - throw new BackupRestoreException("Number of parts(" + partNum + ") does not match the expected number of uploaded parts(" + partETags.size() + ")"); + throw new BackupRestoreException( + "Number of parts(" + + partNum + + ") does not match the expected number of uploaded parts(" + + partETags.size() + + ")"); } - CompleteMultipartUploadResult resultS3MultiPartUploadComplete = new S3PartUploader(s3Client, part, partETags).completeUpload(); //complete the aws chunking upload by providing to aws the ETag that uniquely identifies the combined object data - checkSuccessfulUpload(resultS3MultiPartUploadComplete, path); - + // complete the aws chunking upload by providing to aws the ETag that uniquely + // identifies the combined object datav + CompleteMultipartUploadResult resultS3MultiPartUploadComplete = + new S3PartUploader(s3Client, part, partETags).completeUpload(); + checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath); + return encryptedFileSize; } catch (Exception e) { - throw encounterError(path, new S3PartUploader(s3Client, part, partETags), e); + new S3PartUploader(s3Client, part, partETags).abortUpload(); + throw new BackupRestoreException("Error uploading file: " + localPath, e); } finally { - IOUtils.closeQuietly(compressedBis); - if (compressedDstFile.exists()) - compressedDstFile.delete(); + if (compressedDstFile.exists()) compressedDstFile.delete(); } - } - - - @Override - public int getActivecount() { - return executor.getActiveCount(); - } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java b/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java deleted file mode 100755 index a60a61103..000000000 --- a/priam/src/main/java/com/netflix/priam/aws/S3EncryptedFileSystemMBean.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.aws; - -public interface S3EncryptedFileSystemMBean { - - String ENCRYPTED_FILE_SYSTEM_MBEAN_NAME = "com.priam.aws.S3EncryptedFileSystemMBean:name=S3EncryptedFileSystemMBean"; - - long downloadCount(); - - long uploadCount(); - - int getActivecount(); - - long bytesUploaded(); - - long bytesDownloaded(); -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java b/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java index 42ff0cb7f..ce597c13e 100644 --- a/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3FileSystem.java @@ -1,189 +1,227 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.aws; - -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.S3ResponseMetadata; -import com.amazonaws.services.s3.model.*; -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.google.inject.Singleton; -import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.aws.auth.IS3Credential; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.BackupRestoreException; -import com.netflix.priam.backup.RangeReadInputStream; -import com.netflix.priam.compress.ICompression; -import com.netflix.priam.merics.BackupMetrics; -import com.netflix.priam.notification.BackupNotificationMgr; -import com.netflix.priam.utils.BoundedExponentialRetryCallable; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.MBeanServer; -import javax.management.ObjectName; -import java.io.*; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Implementation of IBackupFileSystem for S3 - */ -@Singleton -public class S3FileSystem extends S3FileSystemBase implements S3FileSystemMBean { - private static final Logger logger = LoggerFactory.getLogger(S3FileSystem.class); - - @Inject - public S3FileSystem(@Named("awss3roleassumption") IS3Credential cred, Provider pathProvider, - ICompression compress, - final IConfiguration config, - BackupMetrics backupMetrics, - BackupNotificationMgr backupNotificationMgr) { - super(pathProvider, compress, config, backupMetrics, backupNotificationMgr); - - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - s3Client = AmazonS3Client.builder().withCredentials(cred.getAwsCredentialProvider()).withRegion(config.getDC()).build(); - } - - @Override - public void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException { - try { - RangeReadInputStream rris = new RangeReadInputStream(s3Client, getPrefix(this.config), path); - final long bufSize = MAX_BUFFERED_IN_STREAM_SIZE > path.getSize() ? path.getSize() : MAX_BUFFERED_IN_STREAM_SIZE; - compress.decompressAndClose(new BufferedInputStream(rris, (int) bufSize), os); - } catch (Exception e) { - throw new BackupRestoreException("Exception encountered downloading " + path.getRemotePath() + " from S3 bucket " + getPrefix(config) - + ", Msg: " + e.getMessage(), e); - } - } - - private void uploadMultipart(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException { - InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(), path.getRemotePath()); - InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); - DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId()); - List partETags = Collections.synchronizedList(new ArrayList()); - - try { - Iterator chunks = compress.compress(in, chunkSize); - // Upload parts. - int partNum = 0; - AtomicInteger partsUploaded = new AtomicInteger(0); - - while (chunks.hasNext()) { - byte[] chunk = chunks.next(); - rateLimiter.acquire(chunk.length); - DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId()); - S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags, partsUploaded); - executor.submit(partUploader); - bytesUploaded.addAndGet(chunk.length); - } - executor.sleepTillEmpty(); - logger.info("All chunks uploaded for file {}, num of expected parts:{}, num of actual uploaded parts: {}", path.getFileName(), partNum, partsUploaded.get()); - - if (partNum != partETags.size()) - throw new BackupRestoreException("Number of parts(" + partNum + ") does not match the uploaded parts(" + partETags.size() + ")"); - - CompleteMultipartUploadResult resultS3MultiPartUploadComplete = new S3PartUploader(s3Client, part, partETags).completeUpload(); - checkSuccessfulUpload(resultS3MultiPartUploadComplete, path); - - if (logger.isDebugEnabled()) { - final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest); - final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header - final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header - logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]"); - } - - } catch (Exception e) { - throw encounterError(path, new S3PartUploader(s3Client, part, partETags), e); - } finally { - IOUtils.closeQuietly(in); - } - } - - @Override - public void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException { - - if (path.getSize() < chunkSize) { - //Upload file without using multipart upload as it will be more efficient. - if (logger.isDebugEnabled()) - logger.debug("Uploading file using put: {}", path.getRemotePath()); - - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { - Iterator chunkedStream = compress.compress(in, chunkSize); - while (chunkedStream.hasNext()) { - byteArrayOutputStream.write(chunkedStream.next()); - } - byte[] chunk = byteArrayOutputStream.toByteArray(); - rateLimiter.acquire(chunk.length); - ObjectMetadata objectMetadata = new ObjectMetadata(); - objectMetadata.setContentLength(chunk.length); - PutObjectRequest putObjectRequest = new PutObjectRequest(config.getBackupPrefix(), path.getRemotePath(), new ByteArrayInputStream(chunk), objectMetadata); - //Retry if failed. - PutObjectResult upload = new BoundedExponentialRetryCallable() { - @Override - public PutObjectResult retriableCall() throws Exception { - return s3Client.putObject(putObjectRequest); - } - }.retriableCall(); - - bytesUploaded.addAndGet(chunk.length); - - if (logger.isDebugEnabled()) - logger.debug("Successfully uploaded file with putObject: {} and etag: {}", path.getRemotePath(), upload.getETag()); - } catch (Exception e) { - throw encounterError(path, e); - } finally { - IOUtils.closeQuietly(in); - } - } else - uploadMultipart(path, in, chunkSize); - } - - - @Override - public int getActivecount() { - return executor.getActiveCount(); - } - - - @Override - /* - Note: provides same information as getBytesUploaded() but it's meant for S3FileSystemMBean object types. - */ - public long bytesUploaded() { - return super.bytesUploaded.get(); - } - - - @Override - public long bytesDownloaded() { - return bytesDownloaded.get(); - } - -} \ No newline at end of file +/* + * Copyright 2013 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.aws; + +import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.S3ResponseMetadata; +import com.amazonaws.services.s3.model.*; +import com.google.common.base.Preconditions; +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.google.inject.Singleton; +import com.google.inject.name.Named; +import com.netflix.priam.aws.auth.IS3Credential; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.backup.DynamicRateLimiter; +import com.netflix.priam.backup.RangeReadInputStream; +import com.netflix.priam.compress.ChunkedStream; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.utils.BoundedExponentialRetryCallable; +import com.netflix.priam.utils.SystemUtils; +import java.io.*; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Implementation of IBackupFileSystem for S3 */ +@Singleton +public class S3FileSystem extends S3FileSystemBase { + private static final Logger logger = LoggerFactory.getLogger(S3FileSystem.class); + private static final long MAX_BUFFER_SIZE = 5L * 1024L * 1024L; + private final DynamicRateLimiter dynamicRateLimiter; + + @Inject + public S3FileSystem( + @Named("awss3roleassumption") IS3Credential cred, + Provider pathProvider, + ICompression compress, + final IConfiguration config, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + InstanceInfo instanceInfo, + DynamicRateLimiter dynamicRateLimiter) { + super(pathProvider, compress, config, backupMetrics, backupNotificationMgr); + s3Client = + AmazonS3Client.builder() + .withCredentials(cred.getAwsCredentialProvider()) + .withRegion(instanceInfo.getRegion()) + .build(); + this.dynamicRateLimiter = dynamicRateLimiter; + } + + @Override + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + String remotePath = path.getRemotePath(); + File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix); + long size = super.getFileSize(remotePath); + final int bufferSize = Math.toIntExact(Math.min(MAX_BUFFER_SIZE, size)); + try (BufferedInputStream is = + new BufferedInputStream( + new RangeReadInputStream(s3Client, getShard(), size, remotePath), + bufferSize); + BufferedOutputStream os = + new BufferedOutputStream(new FileOutputStream(localFile))) { + if (path.getCompression() == CompressionType.NONE) { + IOUtils.copyLarge(is, os); + } else { + compress.decompressAndClose(is, os); + } + } catch (Exception e) { + String err = + String.format( + "Failed to GET %s Bucket: %s Msg: %s", + remotePath, getShard(), e.getMessage()); + throw new BackupRestoreException(err); + } + } + + private ObjectMetadata getObjectMetadata(File file) { + ObjectMetadata ret = new ObjectMetadata(); + long lastModified = file.lastModified(); + + if (lastModified != 0) { + ret.addUserMetadata("local-modification-time", Long.toString(lastModified)); + } + + long fileSize = file.length(); + if (fileSize != 0) { + ret.addUserMetadata("local-size", Long.toString(fileSize)); + } + return ret; + } + + private long uploadMultipart(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + Path localPath = Paths.get(path.getBackupFile().getAbsolutePath()); + String remotePath = path.getRemotePath(); + long chunkSize = getChunkSize(localPath); + String prefix = config.getBackupPrefix(); + if (logger.isDebugEnabled()) + logger.debug("Uploading to {}/{} with chunk size {}", prefix, remotePath, chunkSize); + File localFile = localPath.toFile(); + InitiateMultipartUploadRequest initRequest = + new InitiateMultipartUploadRequest(prefix, remotePath) + .withObjectMetadata(getObjectMetadata(localFile)); + String uploadId = s3Client.initiateMultipartUpload(initRequest).getUploadId(); + DataPart part = new DataPart(prefix, remotePath, uploadId); + List partETags = Collections.synchronizedList(new ArrayList<>()); + + try (InputStream in = new FileInputStream(localFile)) { + Iterator chunks = new ChunkedStream(in, chunkSize, path.getCompression()); + int partNum = 0; + AtomicInteger partsPut = new AtomicInteger(0); + long compressedFileSize = 0; + + while (chunks.hasNext()) { + byte[] chunk = chunks.next(); + rateLimiter.acquire(chunk.length); + dynamicRateLimiter.acquire(path, target, chunk.length); + DataPart dp = new DataPart(++partNum, chunk, prefix, remotePath, uploadId); + S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags, partsPut); + compressedFileSize += chunk.length; + // TODO: output Future instead, collect them here, wait for all below + executor.submit(partUploader); + } + + executor.sleepTillEmpty(); + logger.info("{} done. part count: {} expected: {}", localFile, partsPut.get(), partNum); + Preconditions.checkState(partNum == partETags.size(), "part count mismatch"); + CompleteMultipartUploadResult resultS3MultiPartUploadComplete = + new S3PartUploader(s3Client, part, partETags).completeUpload(); + checkSuccessfulUpload(resultS3MultiPartUploadComplete, localPath); + + if (logger.isDebugEnabled()) { + final S3ResponseMetadata info = s3Client.getCachedResponseMetadata(initRequest); + logger.debug("Request Id: {}, Host Id: {}", info.getRequestId(), info.getHostId()); + } + + return compressedFileSize; + } catch (Exception e) { + new S3PartUploader(s3Client, part, partETags).abortUpload(); + throw new BackupRestoreException("Error uploading file: " + localPath.toString(), e); + } + } + + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile(); + if (localFile.length() >= config.getBackupChunkSize()) return uploadMultipart(path, target); + byte[] chunk = getFileContents(path); + // C* snapshots may have empty files. That is probably unintentional. + if (chunk.length > 0) { + rateLimiter.acquire(chunk.length); + dynamicRateLimiter.acquire(path, target, chunk.length); + } + try { + new BoundedExponentialRetryCallable(1000, 10000, 5) { + @Override + public PutObjectResult retriableCall() { + return s3Client.putObject(generatePut(path, chunk)); + } + }.call(); + } catch (Exception e) { + throw new BackupRestoreException("Error uploading file: " + localFile.getName(), e); + } + return chunk.length; + } + + private PutObjectRequest generatePut(AbstractBackupPath path, byte[] chunk) { + File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile(); + ObjectMetadata metadata = getObjectMetadata(localFile); + metadata.setContentLength(chunk.length); + PutObjectRequest put = + new PutObjectRequest( + config.getBackupPrefix(), + path.getRemotePath(), + new ByteArrayInputStream(chunk), + metadata); + if (config.addMD5ToBackupUploads()) { + put.getMetadata().setContentMD5(SystemUtils.toBase64(SystemUtils.md5(chunk))); + } + return put; + } + + private byte[] getFileContents(AbstractBackupPath path) throws BackupRestoreException { + File localFile = Paths.get(path.getBackupFile().getAbsolutePath()).toFile(); + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + InputStream in = new BufferedInputStream(new FileInputStream(localFile))) { + Iterator chunks = + new ChunkedStream(in, config.getBackupChunkSize(), path.getCompression()); + while (chunks.hasNext()) { + byteArrayOutputStream.write(chunks.next()); + } + return byteArrayOutputStream.toByteArray(); + } catch (Exception e) { + throw new BackupRestoreException("Error reading file: " + localFile.getName(), e); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java b/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java index 6d651ac53..5703424f2 100755 --- a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3FileSystemBase.java @@ -1,377 +1,291 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.aws; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.BucketLifecycleConfiguration; -import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule; -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.RateLimiter; -import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.BackupRestoreException; -import com.netflix.priam.backup.IBackupFileSystem; -import com.netflix.priam.compress.ICompression; -import com.netflix.priam.merics.BackupMetrics; -import com.netflix.priam.notification.BackupEvent; -import com.netflix.priam.notification.BackupNotificationMgr; -import com.netflix.priam.notification.EventGenerator; -import com.netflix.priam.notification.EventObserver; -import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -public abstract class S3FileSystemBase implements IBackupFileSystem, EventGenerator { - protected static final int MAX_CHUNKS = 10000; - protected static final long MAX_BUFFERED_IN_STREAM_SIZE = 5 * 1024 * 1024; - protected static final long UPLOAD_TIMEOUT = (2 * 60 * 60 * 1000L); - private static final Logger logger = LoggerFactory.getLogger(S3FileSystemBase.class); - //protected AtomicInteger uploadCount = new AtomicInteger(); - protected AtomicLong bytesUploaded = new AtomicLong(); //bytes uploaded per file - //protected AtomicInteger downloadCount = new AtomicInteger(); - protected AtomicLong bytesDownloaded = new AtomicLong(); - protected BackupMetrics backupMetrics; - protected AmazonS3 s3Client; - protected IConfiguration config; - protected Provider pathProvider; - protected ICompression compress; - protected BlockingSubmitThreadPoolExecutor executor; - protected RateLimiter rateLimiter; //a throttling mechanism, we can limit the amount of bytes uploaded to endpoint per second. - private final CopyOnWriteArrayList> observers = new CopyOnWriteArrayList<>(); - - public S3FileSystemBase(Provider pathProvider, - ICompression compress, - final IConfiguration config, - BackupMetrics backupMetrics, - BackupNotificationMgr backupNotificationMgr) { - this.pathProvider = pathProvider; - this.compress = compress; - this.config = config; - this.backupMetrics = backupMetrics; - - int threads = config.getMaxBackupUploadThreads(); - LinkedBlockingQueue queue = new LinkedBlockingQueue(threads); - this.executor = new BlockingSubmitThreadPoolExecutor(threads, queue, UPLOAD_TIMEOUT); - - double throttleLimit = config.getUploadThrottle(); - this.rateLimiter = RateLimiter.create(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit); - this.addObserver(backupNotificationMgr); - } - - public AmazonS3 getS3Client() { - return s3Client; - } - - /* - * A means to change the default handle to the S3 client. - */ - public void setS3Client(AmazonS3 client) { - s3Client = client; - } - - /** - * Get S3 prefix which will be used to locate S3 files - */ - protected String getPrefix(IConfiguration config) { - String prefix; - if (StringUtils.isNotBlank(config.getRestorePrefix())) - prefix = config.getRestorePrefix(); - else - prefix = config.getBackupPrefix(); - - String[] paths = prefix.split(String.valueOf(S3BackupPath.PATH_SEP)); - return paths[0]; - } - - @Override - public void cleanup() { - - AmazonS3 s3Client = getS3Client(); - String clusterPath = pathProvider.get().clusterPrefix(""); - logger.debug("Bucket: {}", config.getBackupPrefix()); - BucketLifecycleConfiguration lifeConfig = s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix()); - logger.debug("Got bucket:{} lifecycle.{}", config.getBackupPrefix(), lifeConfig); - if (lifeConfig == null) { - lifeConfig = new BucketLifecycleConfiguration(); - List rules = Lists.newArrayList(); - lifeConfig.setRules(rules); - } - List rules = lifeConfig.getRules(); - if (updateLifecycleRule(config, rules, clusterPath)) { - if (rules.size() > 0) { - lifeConfig.setRules(rules); - s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig); - } else - s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix()); - } - - } - - private boolean updateLifecycleRule(IConfiguration config, List rules, String prefix) { - Rule rule = null; - for (BucketLifecycleConfiguration.Rule lcRule : rules) { - if (lcRule.getPrefix().equals(prefix)) { - rule = lcRule; - break; - } - } - if (rule == null && config.getBackupRetentionDays() <= 0) - return false; - if (rule != null && rule.getExpirationInDays() == config.getBackupRetentionDays()) { - logger.info("Cleanup rule already set"); - return false; - } - if (rule == null) { - // Create a new rule - rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(config.getBackupRetentionDays()).withPrefix(prefix); - rule.setStatus(BucketLifecycleConfiguration.ENABLED); - rule.setId(prefix); - rules.add(rule); - logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), rule.getExpirationInDays()); - } else if (config.getBackupRetentionDays() > 0) { - logger.info("Setting cleanup for {} to {} days", rule.getPrefix(), config.getBackupRetentionDays()); - rule.setExpirationInDays(config.getBackupRetentionDays()); - } else { - logger.info("Removing cleanup rule for {}", rule.getPrefix()); - rules.remove(rule); - } - return true; - } - - /* - @param path - representation of the file uploaded - @param start time of upload, in millisecs - @param completion time of upload, in millsecs - */ - private void postProcessingPerFile(AbstractBackupPath path, long startTimeInMilliSecs, long completedTimeInMilliSecs) { - //Publish upload rate for each uploaded file - try { - long sizeInBytes = path.getSize(); - long elapseTimeInMillisecs = completedTimeInMilliSecs - startTimeInMilliSecs; - long elapseTimeInSecs = elapseTimeInMillisecs / 1000; //converting millis to seconds as 1000m in 1 second - long bytesReadPerSec = 0; - Double speedInKBps = 0.0; - if (elapseTimeInSecs > 0 && sizeInBytes > 0) { - bytesReadPerSec = sizeInBytes / elapseTimeInSecs; - speedInKBps = bytesReadPerSec / 1024D; - } else { - bytesReadPerSec = sizeInBytes; //we uploaded the whole file in less than a sec - speedInKBps = (double) sizeInBytes; - } - - logger.info("Upload rate for file: {}" - + ", elapsse time in sec(s): {}" - + ", KB per sec: {}", - path.getFileName(), elapseTimeInSecs, speedInKBps); - backupMetrics.recordUploadRate(sizeInBytes); - } catch (Exception e) { - logger.error("Post processing of file {} failed, not fatal.", path.getFileName(), e); - } - } - - /* - Reinitializtion which should be performed before uploading a file - */ - protected void reinitialize() { - bytesUploaded = new AtomicLong(0); //initialize - } - - /* - @param file uploaded to S3 - @param a list of unique parts uploaded to S3 for file - */ - protected void logDiagnosticInfo(AbstractBackupPath fileUploaded, CompleteMultipartUploadResult res) { - File f = fileUploaded.getBackupFile(); - String fName = f.getAbsolutePath(); - logger.info("Uploaded file: {}, object eTag: {}", fName, res.getETag()); - } - - @Override - public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException { - reinitialize(); //perform before file upload - long chunkSize = config.getBackupChunkSize(); - if (path.getSize() > 0) - chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1)) : chunkSize; //compute the size of each block we will upload to endpoint - - logger.info("Uploading to {}/{} with chunk size {}", config.getBackupPrefix(), path.getRemotePath(), chunkSize); - - long startTime = System.nanoTime(); //initialize for each file upload - notifyEventStart(new BackupEvent(path)); - uploadFile(path, in, chunkSize); - long completedTime = System.nanoTime(); - postProcessingPerFile(path, TimeUnit.NANOSECONDS.toMillis(startTime), TimeUnit.NANOSECONDS.toMillis(completedTime)); - notifyEventSuccess(new BackupEvent(path)); - backupMetrics.incrementValidUploads(); - } - - protected void checkSuccessfulUpload(CompleteMultipartUploadResult resultS3MultiPartUploadComplete, AbstractBackupPath path) throws BackupRestoreException { - if (null != resultS3MultiPartUploadComplete && null != resultS3MultiPartUploadComplete.getETag()) { - String eTagObjectId = resultS3MultiPartUploadComplete.getETag(); //unique id of the whole object - logDiagnosticInfo(path, resultS3MultiPartUploadComplete); - } else { - this.backupMetrics.incrementInvalidUploads(); - throw new BackupRestoreException("Error uploading file as ETag or CompleteMultipartUploadResult is NULL -" + path.getFileName()); - } - } - - - protected BackupRestoreException encounterError(AbstractBackupPath path, S3PartUploader s3PartUploader, Exception e) { - s3PartUploader.abortUpload(); - return encounterError(path, e); - } - - protected BackupRestoreException encounterError(AbstractBackupPath path, Exception e) { - this.backupMetrics.incrementInvalidUploads(); - if (e instanceof AmazonS3Exception) { - AmazonS3Exception a = (AmazonS3Exception) e; - String amazoneErrorCode = a.getErrorCode(); - if (amazoneErrorCode != null && !amazoneErrorCode.isEmpty()) { - if (amazoneErrorCode.equalsIgnoreCase("slowdown")) { - backupMetrics.incrementAwsSlowDownException(1); - logger.warn("Received slow down from AWS when uploading file: {}", path.getFileName()); - } - } - } - - logger.error("Error uploading file {}, a datapart was not uploaded.", path.getFileName(), e); - notifyEventFailure(new BackupEvent(path)); - return new BackupRestoreException("Error uploading file " + path.getFileName(), e); - } - - abstract void uploadFile(AbstractBackupPath path, InputStream in, long chunkSize) throws BackupRestoreException; - - /** - * This method does exactly as other download method.(Supposed to be overridden) - * filePath parameter provides the diskPath of the downloaded file. - * This path can be used to correlate the files which are Streamed In - * during Incremental Restores - */ - @Override - public void download(AbstractBackupPath path, OutputStream os, - String filePath) throws BackupRestoreException { - try { - // Calling original Download method - download(path, os); - } catch (Exception e) { - throw new BackupRestoreException(e.getMessage(), e); - } - - } - - @Override - public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException { - logger.info("Downloading {} from S3 bucket {}", path.getRemotePath(), getPrefix(this.config)); - long contentLen = s3Client.getObjectMetadata(getPrefix(config), path.getRemotePath()).getContentLength(); - path.setSize(contentLen); - try { - downloadFile(path, os); - bytesDownloaded.addAndGet(contentLen); - backupMetrics.incrementValidDownloads(); - } catch (BackupRestoreException e) { - backupMetrics.incrementInvalidDownloads(); - throw e; - } - } - - protected abstract void downloadFile(AbstractBackupPath path, OutputStream os) throws BackupRestoreException; - - @Override - public long getBytesUploaded() { - return bytesUploaded.get(); - } - - @Override - public long getAWSSlowDownExceptionCounter() { - return backupMetrics.getAwsSlowDownException(); - } - - public long downloadCount() { - return backupMetrics.getValidDownloads(); - } - - public long uploadCount() { - return backupMetrics.getValidUploads(); - } - - @Override - public void shutdown() { - if (executor != null) - executor.shutdown(); - - } - - @Override - public Iterator listPrefixes(Date date) { - return new S3PrefixIterator(config, pathProvider, s3Client, date); - } - - @Override - public Iterator list(String path, Date start, Date till) { - return new S3FileIterator(pathProvider, s3Client, path, start, till); - } - - - @Override - public final void addObserver(EventObserver observer) { - if (observer == null) - throw new NullPointerException("observer must not be null."); - - observers.addIfAbsent(observer); - } - - @Override - public void removeObserver(EventObserver observer) { - if (observer == null) - throw new NullPointerException("observer must not be null."); - - observers.remove(observer); - } - - @Override - public void notifyEventStart(BackupEvent event) { - observers.forEach(eventObserver -> eventObserver.updateEventStart(event)); - } - - @Override - public void notifyEventSuccess(BackupEvent event) { - observers.forEach(eventObserver -> eventObserver.updateEventSuccess(event)); - } - - @Override - public void notifyEventFailure(BackupEvent event) { - observers.forEach(eventObserver -> eventObserver.updateEventFailure(event)); - } - - @Override - public void notifyEventStop(BackupEvent event) { - observers.forEach(eventObserver -> eventObserver.updateEventStop(event)); - } -} +/** + * Copyright 2017 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.priam.aws; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.BucketLifecycleConfiguration; +import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.Rule; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.lifecycle.*; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.RateLimiter; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.AbstractFileSystem; +import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class S3FileSystemBase extends AbstractFileSystem { + private static final int MAX_CHUNKS = 9995; // 10K is AWS limit, minus a small buffer + private static final Logger logger = LoggerFactory.getLogger(S3FileSystemBase.class); + AmazonS3 s3Client; + final IConfiguration config; + final ICompression compress; + final BlockingSubmitThreadPoolExecutor executor; + final RateLimiter rateLimiter; + private final RateLimiter objectExistLimiter; + + S3FileSystemBase( + Provider pathProvider, + ICompression compress, + final IConfiguration config, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr) { + super(config, backupMetrics, backupNotificationMgr, pathProvider); + this.compress = compress; + this.config = config; + + int threads = config.getBackupThreads(); + LinkedBlockingQueue queue = new LinkedBlockingQueue<>(threads); + this.executor = + new BlockingSubmitThreadPoolExecutor(threads, queue, config.getUploadTimeout()); + + // a throttling mechanism, we can limit the amount of bytes uploaded to endpoint per second. + this.rateLimiter = RateLimiter.create(1); + // a throttling mechanism, we can limit the amount of S3 API calls endpoint per second. + this.objectExistLimiter = RateLimiter.create(1); + configChangeListener(); + } + + /* + Call this method to change the configuration in runtime via callback. + */ + public void configChangeListener() { + int objectExistLimit = config.getRemoteFileSystemObjectExistsThrottle(); + objectExistLimiter.setRate(objectExistLimit < 1 ? Double.MAX_VALUE : objectExistLimit); + + double throttleLimit = config.getUploadThrottle(); + rateLimiter.setRate(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit); + + logger.info( + "Updating rateLimiters: s3UploadThrottle: {}, objectExistLimiter: {}", + rateLimiter.getRate(), + objectExistLimiter.getRate()); + } + + private AmazonS3 getS3Client() { + return s3Client; + } + + /* + * A means to change the default handle to the S3 client. + */ + public void setS3Client(AmazonS3 client) { + s3Client = client; + } + + @Override + public void cleanup() { + + AmazonS3 s3Client = getS3Client(); + String clusterPath = pathProvider.get().clusterPrefix(""); + logger.debug("Bucket: {}", config.getBackupPrefix()); + BucketLifecycleConfiguration lifeConfig = + s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix()); + logger.debug("Got bucket:{} lifecycle.{}", config.getBackupPrefix(), lifeConfig); + if (lifeConfig == null) { + lifeConfig = new BucketLifecycleConfiguration(); + List rules = Lists.newArrayList(); + lifeConfig.setRules(rules); + } + + List rules = lifeConfig.getRules(); + + if (updateLifecycleRule(config, rules, clusterPath)) { + if (rules.size() > 0) { + lifeConfig.setRules(rules); + s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig); + } else s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix()); + } + } + + // Dummy class to get Prefix. - Why oh why AWS you can't give the details!! + private class PrefixVisitor implements LifecyclePredicateVisitor { + String prefix; + + @Override + public void visit(LifecyclePrefixPredicate lifecyclePrefixPredicate) { + prefix = lifecyclePrefixPredicate.getPrefix(); + } + + @Override + public void visit(LifecycleTagPredicate lifecycleTagPredicate) {} + + @Override + public void visit( + LifecycleObjectSizeGreaterThanPredicate lifecycleObjectSizeGreaterThanPredicate) {} + + @Override + public void visit(LifecycleAndOperator lifecycleAndOperator) {} + + @Override + public void visit( + LifecycleObjectSizeLessThanPredicate lifecycleObjectSizeLessThanPredicate) {} + } + + private Optional getBucketLifecycleRule(List rules, String prefix) { + if (rules == null || rules.isEmpty()) return Optional.empty(); + + for (Rule rule : rules) { + String rulePrefix = ""; + if (rule.getFilter() != null) { + PrefixVisitor prefixVisitor = new PrefixVisitor(); + rule.getFilter().getPredicate().accept(prefixVisitor); + rulePrefix = prefixVisitor.prefix; + } else if (rule.getPrefix() != null) { + // Being backwards compatible, here. + rulePrefix = rule.getPrefix(); + } + if (prefix.equalsIgnoreCase(rulePrefix)) { + return Optional.of(rule); + } + } + + return Optional.empty(); + } + + private boolean updateLifecycleRule(IConfiguration config, List rules, String prefix) { + Optional rule = getBucketLifecycleRule(rules, prefix); + // No need to update the rule as it never existed and retention is not set. + if (!rule.isPresent() && config.getBackupRetentionDays() <= 0) return false; + + // Rule not required as retention days is zero or negative. + if (rule.isPresent() && config.getBackupRetentionDays() <= 0) { + logger.warn( + "Removing the rule for backup retention on prefix: {} as retention is set to [{}] days. Only positive values are supported by S3!!", + prefix, + config.getBackupRetentionDays()); + rules.remove(rule.get()); + return true; + } + + // Rule present and is current. + if (rule.isPresent() + && rule.get().getExpirationInDays() == config.getBackupRetentionDays() + && rule.get().getStatus().equalsIgnoreCase(BucketLifecycleConfiguration.ENABLED)) { + logger.info( + "Cleanup rule already set on prefix: {} with retention period: [{}] days", + prefix, + config.getBackupRetentionDays()); + return false; + } + + if (!rule.isPresent()) { + // Create a new rule + rule = Optional.of(new BucketLifecycleConfiguration.Rule()); + rules.add(rule.get()); + } + + rule.get().setStatus(BucketLifecycleConfiguration.ENABLED); + rule.get().setExpirationInDays(config.getBackupRetentionDays()); + rule.get().setFilter(new LifecycleFilter(new LifecyclePrefixPredicate(prefix))); + rule.get().setId(prefix); + logger.info( + "Setting cleanup rule for prefix: {} with retention period: [{}] days", + prefix, + config.getBackupRetentionDays()); + return true; + } + + void checkSuccessfulUpload( + CompleteMultipartUploadResult resultS3MultiPartUploadComplete, Path localPath) + throws BackupRestoreException { + if (null != resultS3MultiPartUploadComplete + && null != resultS3MultiPartUploadComplete.getETag()) { + logger.info( + "Uploaded file: {}, object eTag: {}", + localPath, + resultS3MultiPartUploadComplete.getETag()); + } else { + throw new BackupRestoreException( + "Error uploading file as ETag or CompleteMultipartUploadResult is NULL -" + + localPath); + } + } + + @Override + public long getFileSize(String remotePath) throws BackupRestoreException { + return s3Client.getObjectMetadata(getShard(), remotePath).getContentLength(); + } + + @Override + protected boolean doesRemoteFileExist(Path remotePath) { + objectExistLimiter.acquire(); + boolean exists = false; + try { + exists = s3Client.doesObjectExist(getShard(), remotePath.toString()); + } catch (AmazonClientException ex) { + // No point throwing this exception up. + logger.error( + "Exception while checking existence of object: {}. Error: {}", + remotePath, + ex.getMessage()); + } + + return exists; + } + + @Override + public void shutdown() { + if (executor != null) executor.shutdown(); + } + + @Override + public Iterator listFileSystem(String prefix, String delimiter, String marker) { + return new S3Iterator(s3Client, getShard(), prefix, delimiter, marker); + } + + @Override + public void deleteFiles(List remotePaths) throws BackupRestoreException { + if (remotePaths.isEmpty()) return; + + try { + List keys = + remotePaths + .stream() + .map( + remotePath -> + new DeleteObjectsRequest.KeyVersion( + remotePath.toString())) + .collect(Collectors.toList()); + s3Client.deleteObjects( + new DeleteObjectsRequest(getShard()).withKeys(keys).withQuiet(true)); + logger.info("Deleted {} objects from S3", remotePaths.size()); + } catch (Exception e) { + logger.error( + "Error while trying to delete [{}] the objects from S3: {}", + remotePaths.size(), + e.getMessage()); + throw new BackupRestoreException(e + " while trying to delete the objects"); + } + } + + final long getChunkSize(Path path) { + return Math.max(path.toFile().length() / MAX_CHUNKS, config.getBackupChunkSize()); + } +} diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java b/priam/src/main/java/com/netflix/priam/aws/S3Iterator.java similarity index 50% rename from priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java rename to priam/src/main/java/com/netflix/priam/aws/S3Iterator.java index a803216c8..0eaf4052a 100644 --- a/priam/src/main/java/com/netflix/priam/aws/S3FileIterator.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3Iterator.java @@ -1,5 +1,5 @@ /* - * Copyright 2013 Netflix, Inc. + * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ * limitations under the License. * */ + package com.netflix.priam.aws; import com.amazonaws.services.s3.AmazonS3; @@ -21,40 +22,51 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.google.common.collect.Lists; -import com.google.inject.Provider; -import com.netflix.priam.backup.AbstractBackupPath; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Date; import java.util.Iterator; import java.util.List; +import org.apache.commons.lang3.StringUtils; /** - * Iterator representing list of backup files available on S3 + * Iterate over the s3 file system. This is really required to find the manifest file for restore + * and downloading incrementals. Created by aagrawal on 11/30/18. */ -public class S3FileIterator implements Iterator { - private static final Logger logger = LoggerFactory.getLogger(S3FileIterator.class); - private final Provider pathProvider; - private final AmazonS3 s3Client; - private final Date start; - private final Date till; - private Iterator iterator; +public class S3Iterator implements Iterator { + private Iterator iterator; private ObjectListing objectListing; + private final AmazonS3 s3Client; + private final String bucket; + private final String prefix; + private final String delimiter; + private final String marker; - public S3FileIterator(Provider pathProvider, AmazonS3 s3Client, String path, Date start, Date till) { - this.start = start; - this.till = till; - this.pathProvider = pathProvider; - ListObjectsRequest listReq = new ListObjectsRequest(); - String[] paths = path.split(String.valueOf(S3BackupPath.PATH_SEP)); - listReq.setBucketName(paths[0]); - listReq.setPrefix(pathProvider.get().remotePrefix(start, till, path)); + public S3Iterator( + AmazonS3 s3Client, String bucket, String prefix, String delimiter, String marker) { this.s3Client = s3Client; - objectListing = s3Client.listObjects(listReq); + this.bucket = bucket; + this.prefix = prefix; + this.delimiter = delimiter; + this.marker = marker; iterator = createIterator(); } + private void initListing() { + ListObjectsRequest listReq = new ListObjectsRequest(); + listReq.setBucketName(bucket); + listReq.setPrefix(prefix); + if (StringUtils.isNotBlank(delimiter)) listReq.setDelimiter(delimiter); + if (StringUtils.isNotBlank(marker)) listReq.setMarker(marker); + objectListing = s3Client.listObjects(listReq); + } + + private Iterator createIterator() { + if (objectListing == null) initListing(); + List temp = Lists.newArrayList(); + for (S3ObjectSummary summary : objectListing.getObjectSummaries()) { + temp.add(summary.getKey()); + } + return temp.iterator(); + } + @Override public boolean hasNext() { if (iterator.hasNext()) { @@ -64,32 +76,12 @@ public boolean hasNext() { objectListing = s3Client.listNextBatchOfObjects(objectListing); iterator = createIterator(); } - } return iterator.hasNext(); } - private Iterator createIterator() { - List temp = Lists.newArrayList(); - for (S3ObjectSummary summary : objectListing.getObjectSummaries()) { - AbstractBackupPath path = pathProvider.get(); - path.parseRemote(summary.getKey()); - logger.debug("New key {} path = {} start: {} end: {} my {}", summary.getKey(), path.getRemotePath(), start, till, path.getTime()); - if ((path.getTime().after(start) && path.getTime().before(till)) || path.getTime().equals(start)) { - temp.add(path); - logger.debug("Added key {}", summary.getKey()); - } - } - return temp.iterator(); - } - @Override - public AbstractBackupPath next() { + public String next() { return iterator.next(); } - - @Override - public void remove() { - throw new IllegalStateException(); - } } diff --git a/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java b/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java index 1d7c1250d..5a6a04ae6 100644 --- a/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java +++ b/priam/src/main/java/com/netflix/priam/aws/S3PartUploader.java @@ -20,40 +20,40 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.*; import com.netflix.priam.backup.BackupRestoreException; -import com.netflix.priam.utils.RetryableCallable; +import com.netflix.priam.utils.BoundedExponentialRetryCallable; import com.netflix.priam.utils.SystemUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.ByteArrayInputStream; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class S3PartUploader extends RetryableCallable { +public class S3PartUploader extends BoundedExponentialRetryCallable { private final AmazonS3 client; - private DataPart dataPart; - private List partETags; - private AtomicInteger partsUploaded = null; //num of data parts successfully uploaded + private final DataPart dataPart; + private final List partETags; + private AtomicInteger partsUploaded = null; // num of data parts successfully uploaded private static final Logger logger = LoggerFactory.getLogger(S3PartUploader.class); private static final int MAX_RETRIES = 5; + private static final int DEFAULT_MIN_SLEEP_MS = 200; public S3PartUploader(AmazonS3 client, DataPart dp, List partETags) { - super(MAX_RETRIES, RetryableCallable.DEFAULT_WAIT_TIME); + super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES); this.client = client; this.dataPart = dp; this.partETags = partETags; } - public S3PartUploader(AmazonS3 client, DataPart dp, List partETags, AtomicInteger partsUploaded) { - super(MAX_RETRIES, RetryableCallable.DEFAULT_WAIT_TIME); + public S3PartUploader( + AmazonS3 client, DataPart dp, List partETags, AtomicInteger partsUploaded) { + super(DEFAULT_MIN_SLEEP_MS, BoundedExponentialRetryCallable.MAX_SLEEP, MAX_RETRIES); this.client = client; this.dataPart = dp; this.partETags = partETags; this.partsUploaded = partsUploaded; } - private Void uploadPart() throws AmazonClientException, BackupRestoreException { UploadPartRequest req = new UploadPartRequest(); req.setBucketName(dataPart.getBucketName()); @@ -66,27 +66,35 @@ private Void uploadPart() throws AmazonClientException, BackupRestoreException { UploadPartResult res = client.uploadPart(req); PartETag partETag = res.getPartETag(); if (!partETag.getETag().equals(SystemUtils.toHex(dataPart.getMd5()))) - throw new BackupRestoreException("Unable to match MD5 for part " + dataPart.getPartNo()); + throw new BackupRestoreException( + "Unable to match MD5 for part " + dataPart.getPartNo()); partETags.add(partETag); - if (this.partsUploaded != null) - this.partsUploaded.incrementAndGet(); + if (this.partsUploaded != null) this.partsUploaded.incrementAndGet(); return null; } public CompleteMultipartUploadResult completeUpload() throws BackupRestoreException { - CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID(), partETags); + CompleteMultipartUploadRequest compRequest = + new CompleteMultipartUploadRequest( + dataPart.getBucketName(), + dataPart.getS3key(), + dataPart.getUploadID(), + partETags); return client.completeMultipartUpload(compRequest); } // Abort public void abortUpload() { - AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID()); + AbortMultipartUploadRequest abortRequest = + new AbortMultipartUploadRequest( + dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID()); client.abortMultipartUpload(abortRequest); } @Override public Void retriableCall() throws AmazonClientException, BackupRestoreException { - logger.debug("Picked up part {} size {}", dataPart.getPartNo(), dataPart.getPartData().length); + logger.debug( + "Picked up part {} size {}", dataPart.getPartNo(), dataPart.getPartData().length); return uploadPart(); } } diff --git a/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java b/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java deleted file mode 100644 index 649486b33..000000000 --- a/priam/src/main/java/com/netflix/priam/aws/S3PrefixIterator.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.aws; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.google.common.collect.Lists; -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Iterator; -import java.util.List; - -/** - * Class to iterate over prefixes (S3 Common prefixes) upto - * the token element in the path. The abstract path generated by this class - * is partial (does not have all data). - */ -public class S3PrefixIterator implements Iterator { - private static final Logger logger = LoggerFactory.getLogger(S3PrefixIterator.class); - private final IConfiguration config; - private final AmazonS3 s3Client; - private final Provider pathProvider; - private Iterator iterator; - - private String bucket = ""; - private String clusterPath = ""; - private SimpleDateFormat datefmt = new SimpleDateFormat("yyyyMMdd"); - private ObjectListing objectListing = null; - Date date; - - @Inject - public S3PrefixIterator(IConfiguration config, Provider pathProvider, AmazonS3 s3Client, Date date) { - this.config = config; - this.pathProvider = pathProvider; - this.s3Client = s3Client; - this.date = date; - String path = ""; - if (StringUtils.isNotBlank(config.getRestorePrefix())) - path = config.getRestorePrefix(); - else - path = config.getBackupPrefix(); - - String[] paths = path.split(String.valueOf(S3BackupPath.PATH_SEP)); - bucket = paths[0]; - this.clusterPath = remotePrefix(path); - iterator = createIterator(); - } - - private void initListing() { - ListObjectsRequest listReq = new ListObjectsRequest(); - // Get list of tokens - listReq.setBucketName(bucket); - listReq.setPrefix(clusterPath); - listReq.setDelimiter(String.valueOf(AbstractBackupPath.PATH_SEP)); - logger.info("Using cluster prefix for searching tokens: {}", clusterPath); - objectListing = s3Client.listObjects(listReq); - - } - - private Iterator createIterator() { - if (objectListing == null) - initListing(); - List temp = Lists.newArrayList(); - for (String summary : objectListing.getCommonPrefixes()) { - if (pathExistsForDate(summary, datefmt.format(date))) { - AbstractBackupPath path = pathProvider.get(); - path.parsePartialPrefix(summary); - temp.add(path); - } - } - return temp.iterator(); - } - - @Override - public boolean hasNext() { - if (iterator.hasNext()) { - return true; - } else { - while (objectListing.isTruncated() && !iterator.hasNext()) { - objectListing = s3Client.listNextBatchOfObjects(objectListing); - iterator = createIterator(); - } - } - return iterator.hasNext(); - } - - @Override - public AbstractBackupPath next() { - return iterator.next(); - } - - @Override - public void remove() { - } - - /** - * Get remote prefix upto the token - */ - private String remotePrefix(String location) { - StringBuffer buff = new StringBuffer(); - String[] elements = location.split(String.valueOf(S3BackupPath.PATH_SEP)); - if (elements.length <= 1) { - buff.append(config.getBackupLocation()).append(S3BackupPath.PATH_SEP); - buff.append(config.getDC()).append(S3BackupPath.PATH_SEP); - buff.append(config.getAppName()).append(S3BackupPath.PATH_SEP); - } else { - assert elements.length >= 4 : "Too few elements in path " + location; - buff.append(elements[1]).append(S3BackupPath.PATH_SEP); - buff.append(elements[2]).append(S3BackupPath.PATH_SEP); - buff.append(elements[3]).append(S3BackupPath.PATH_SEP); - } - return buff.toString(); - } - - /** - * Check to see if the path exists for the date - */ - private boolean pathExistsForDate(String tprefix, String datestr) { - ListObjectsRequest listReq = new ListObjectsRequest(); - // Get list of tokens - listReq.setBucketName(bucket); - listReq.setPrefix(tprefix + datestr); - ObjectListing listing; - listing = s3Client.listObjects(listReq); - return listing.getObjectSummaries().size() > 0; - } - -} diff --git a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java index 128042435..1d0fe8a08 100644 --- a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java +++ b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceData.java @@ -25,29 +25,36 @@ import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cred.ICredential; import com.netflix.priam.identity.PriamInstance; - import java.util.*; -/** - * DAO for handling Instance identity information such as token, zone, region - */ +/** DAO for handling Instance identity information such as token, zone, region */ @Singleton public class SDBInstanceData { public static class Attributes { - public final static String APP_ID = "appId"; - public final static String ID = "id"; - public final static String INSTANCE_ID = "instanceId"; - public final static String TOKEN = "token"; - public final static String AVAILABILITY_ZONE = "availabilityZone"; - public final static String ELASTIC_IP = "elasticIP"; - public final static String UPDATE_TS = "updateTimestamp"; - public final static String LOCATION = "location"; - public final static String HOSTNAME = "hostname"; + public static final String APP_ID = "appId"; + public static final String ID = "id"; + public static final String INSTANCE_ID = "instanceId"; + public static final String TOKEN = "token"; + public static final String AVAILABILITY_ZONE = "availabilityZone"; + public static final String ELASTIC_IP = "elasticIP"; + public static final String UPDATE_TS = "updateTimestamp"; + public static final String LOCATION = "location"; + public static final String HOSTNAME = "hostname"; } public static final String DOMAIN = "InstanceIdentity"; - public static final String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'"; - public static final String INSTANCE_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s' and " + Attributes.LOCATION + "='%s' and " + Attributes.ID + "='%d'"; + public static final String ALL_QUERY = + "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'"; + public static final String INSTANCE_QUERY = + "select * from " + + DOMAIN + + " where " + + Attributes.APP_ID + + "='%s' and " + + Attributes.LOCATION + + "='%s' and " + + Attributes.ID + + "='%d'"; private final ICredential provider; private final IConfiguration configuration; @@ -67,10 +74,11 @@ public SDBInstanceData(ICredential provider, IConfiguration configuration) { */ public PriamInstance getInstance(String app, String dc, int id) { AmazonSimpleDB simpleDBClient = getSimpleDBClient(); - SelectRequest request = new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id)); + SelectRequest request = + new SelectRequest(String.format(INSTANCE_QUERY, app, dc, id)) + .withConsistentRead(true); SelectResult result = simpleDBClient.select(request); - if (result.getItems().size() == 0) - return null; + if (result.getItems().size() == 0) return null; return transform(result.getItems().get(0)); } @@ -82,16 +90,17 @@ public PriamInstance getInstance(String app, String dc, int id) { */ public Set getAllIds(String app) { AmazonSimpleDB simpleDBClient = getSimpleDBClient(); - Set inslist = new HashSet(); + Set inslist = new HashSet<>(); String nextToken = null; do { - SelectRequest request = new SelectRequest(String.format(ALL_QUERY, app)); - request.setNextToken(nextToken); + SelectRequest request = + new SelectRequest(String.format(ALL_QUERY, app)) + .withConsistentRead(true) + .withNextToken(nextToken); SelectResult result = simpleDBClient.select(request); nextToken = result.getNextToken(); - Iterator itemiter = result.getItems().iterator(); - while (itemiter.hasNext()) { - inslist.add(transform(itemiter.next())); + for (Item item : result.getItems()) { + inslist.add(transform(item)); } } while (nextToken != null); @@ -101,24 +110,36 @@ public Set getAllIds(String app) { /** * Create a new instance entry in SimpleDB * - * @param instance - * @throws AmazonServiceException + * @param orig Original instance used for validation + * @param inst Instance entry to be created. + * @throws AmazonServiceException If unable to write to Simple DB because of any error. */ - public void createInstance(PriamInstance instance) throws AmazonServiceException { - AmazonSimpleDB simpleDBClient = getSimpleDBClient(); - PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance)); - simpleDBClient.putAttributes(putReq); + public void updateInstance(PriamInstance orig, PriamInstance inst) + throws AmazonServiceException { + PutAttributesRequest putReq = + new PutAttributesRequest(DOMAIN, getKey(inst), createAttributesToRegister(inst)) + .withExpected( + new UpdateCondition() + .withName(Attributes.INSTANCE_ID) + .withValue(orig.getInstanceId())) + .withExpected( + new UpdateCondition() + .withName(Attributes.TOKEN) + .withValue(orig.getToken())); + getSimpleDBClient().putAttributes(putReq); } /** * Register a new instance. Registration will fail if a prior entry exists * - * @param instance - * @throws AmazonServiceException + * @param instance Instance entry to be registered. + * @throws AmazonServiceException If unable to write to Simple DB because of any error. */ public void registerInstance(PriamInstance instance) throws AmazonServiceException { AmazonSimpleDB simpleDBClient = getSimpleDBClient(); - PutAttributesRequest putReq = new PutAttributesRequest(DOMAIN, getKey(instance), createAttributesToRegister(instance)); + PutAttributesRequest putReq = + new PutAttributesRequest( + DOMAIN, getKey(instance), createAttributesToRegister(instance)); UpdateCondition expected = new UpdateCondition(); expected.setName(Attributes.INSTANCE_ID); expected.setExists(false); @@ -134,27 +155,33 @@ public void registerInstance(PriamInstance instance) throws AmazonServiceExcepti */ public void deregisterInstance(PriamInstance instance) throws AmazonServiceException { AmazonSimpleDB simpleDBClient = getSimpleDBClient(); - DeleteAttributesRequest delReq = new DeleteAttributesRequest(DOMAIN, getKey(instance), createAttributesToDeRegister(instance)); + DeleteAttributesRequest delReq = + new DeleteAttributesRequest( + DOMAIN, getKey(instance), createAttributesToDeRegister(instance)); simpleDBClient.deleteAttributes(delReq); } protected List createAttributesToRegister(PriamInstance instance) { instance.setUpdatetime(new Date().getTime()); - List attrs = new ArrayList(); - attrs.add(new ReplaceableAttribute(Attributes.INSTANCE_ID, instance.getInstanceId(), false)); + List attrs = new ArrayList<>(); + attrs.add( + new ReplaceableAttribute(Attributes.INSTANCE_ID, instance.getInstanceId(), false)); attrs.add(new ReplaceableAttribute(Attributes.TOKEN, instance.getToken(), true)); attrs.add(new ReplaceableAttribute(Attributes.APP_ID, instance.getApp(), true)); - attrs.add(new ReplaceableAttribute(Attributes.ID, Integer.toString(instance.getId()), true)); + attrs.add( + new ReplaceableAttribute(Attributes.ID, Integer.toString(instance.getId()), true)); attrs.add(new ReplaceableAttribute(Attributes.AVAILABILITY_ZONE, instance.getRac(), true)); attrs.add(new ReplaceableAttribute(Attributes.ELASTIC_IP, instance.getHostIP(), true)); attrs.add(new ReplaceableAttribute(Attributes.HOSTNAME, instance.getHostName(), true)); attrs.add(new ReplaceableAttribute(Attributes.LOCATION, instance.getDC(), true)); - attrs.add(new ReplaceableAttribute(Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime()), true)); + attrs.add( + new ReplaceableAttribute( + Attributes.UPDATE_TS, Long.toString(instance.getUpdatetime()), true)); return attrs; } protected List createAttributesToDeRegister(PriamInstance instance) { - List attrs = new ArrayList(); + List attrs = new ArrayList<>(); attrs.add(new Attribute(Attributes.INSTANCE_ID, instance.getInstanceId())); attrs.add(new Attribute(Attributes.TOKEN, instance.getToken())); attrs.add(new Attribute(Attributes.APP_ID, instance.getApp())); @@ -175,25 +202,16 @@ protected List createAttributesToDeRegister(PriamInstance instance) { */ public PriamInstance transform(Item item) { PriamInstance ins = new PriamInstance(); - Iterator attrs = item.getAttributes().iterator(); - while (attrs.hasNext()) { - Attribute att = attrs.next(); - if (att.getName().equals(Attributes.INSTANCE_ID)) - ins.setInstanceId(att.getValue()); - else if (att.getName().equals(Attributes.TOKEN)) - ins.setToken(att.getValue()); - else if (att.getName().equals(Attributes.APP_ID)) - ins.setApp(att.getValue()); + for (Attribute att : item.getAttributes()) { + if (att.getName().equals(Attributes.INSTANCE_ID)) ins.setInstanceId(att.getValue()); + else if (att.getName().equals(Attributes.TOKEN)) ins.setToken(att.getValue()); + else if (att.getName().equals(Attributes.APP_ID)) ins.setApp(att.getValue()); else if (att.getName().equals(Attributes.ID)) ins.setId(Integer.parseInt(att.getValue())); - else if (att.getName().equals(Attributes.AVAILABILITY_ZONE)) - ins.setRac(att.getValue()); - else if (att.getName().equals(Attributes.ELASTIC_IP)) - ins.setHostIP(att.getValue()); - else if (att.getName().equals(Attributes.HOSTNAME)) - ins.setHost(att.getValue()); - else if (att.getName().equals(Attributes.LOCATION)) - ins.setDC(att.getValue()); + else if (att.getName().equals(Attributes.AVAILABILITY_ZONE)) ins.setRac(att.getValue()); + else if (att.getName().equals(Attributes.ELASTIC_IP)) ins.setHostIP(att.getValue()); + else if (att.getName().equals(Attributes.HOSTNAME)) ins.setHost(att.getValue()); + else if (att.getName().equals(Attributes.LOCATION)) ins.setDC(att.getValue()); else if (att.getName().equals(Attributes.UPDATE_TS)) ins.setUpdatetime(Long.parseLong(att.getValue())); } @@ -205,7 +223,10 @@ private String getKey(PriamInstance instance) { } private AmazonSimpleDB getSimpleDBClient() { - //Create per request - return AmazonSimpleDBClient.builder().withCredentials(provider.getAwsCredentialProvider()).withRegion(configuration.getSDBInstanceIdentityRegion()).build(); + // Create per request + return AmazonSimpleDBClient.builder() + .withCredentials(provider.getAwsCredentialProvider()) + .withRegion(configuration.getSDBInstanceIdentityRegion()) + .build(); } } diff --git a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java index a10cd5878..df80c0bb5 100644 --- a/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java +++ b/priam/src/main/java/com/netflix/priam/aws/SDBInstanceFactory.java @@ -17,41 +17,40 @@ package com.netflix.priam.aws; import com.amazonaws.AmazonServiceException; +import com.google.common.collect.ImmutableSet; import com.google.inject.Inject; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.IPriamInstanceFactory; import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; +import java.util.*; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - /** - * SimpleDB based instance factory. Requires 'InstanceIdentity' domain to be - * created ahead + * SimpleDB based instance instanceIdentity. Requires 'InstanceIdentity' domain to be created ahead */ @Singleton -public class SDBInstanceFactory implements IPriamInstanceFactory { +public class SDBInstanceFactory implements IPriamInstanceFactory { private static final Logger logger = LoggerFactory.getLogger(SDBInstanceFactory.class); - private final IConfiguration config; private final SDBInstanceData dao; + private final InstanceInfo instanceInfo; @Inject - public SDBInstanceFactory(IConfiguration config, SDBInstanceData dao) { - this.config = config; + public SDBInstanceFactory(SDBInstanceData dao, InstanceInfo instanceInfo) { this.dao = dao; + this.instanceInfo = instanceInfo; } @Override - public List getAllIds(String appName) { - List return_ = new ArrayList(); - for (PriamInstance instance : dao.getAllIds(appName)) { - return_.add(instance); - } - sort(return_); - return return_; + public ImmutableSet getAllIds(String appName) { + return ImmutableSet.copyOf( + dao.getAllIds(appName) + .stream() + .sorted((Comparator.comparingInt(PriamInstance::getId))) + .collect(Collectors.toList())); } @Override @@ -60,18 +59,29 @@ public PriamInstance getInstance(String appName, String dc, int id) { } @Override - public PriamInstance create(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String token) { + public PriamInstance create( + String app, + int id, + String instanceID, + String hostname, + String ip, + String rac, + Map volumes, + String token) { try { - PriamInstance ins = makePriamInstance(app, id, instanceID, hostname, ip, rac, volumes, token); + PriamInstance ins = + makePriamInstance(app, id, instanceID, hostname, ip, rac, volumes, token); // remove old data node which are dead. if (app.endsWith("-dead")) { try { - PriamInstance oldData = dao.getInstance(app, config.getDC(), id); + PriamInstance oldData = dao.getInstance(app, instanceInfo.getRegion(), id); // clean up a very old data... - if (null != oldData && oldData.getUpdatetime() < (System.currentTimeMillis() - (3 * 60 * 1000))) + if (null != oldData + && oldData.getUpdatetime() + < (System.currentTimeMillis() - (3 * 60 * 1000))) dao.deregisterInstance(oldData); } catch (Exception ex) { - //Do nothing + // Do nothing logger.error(ex.getMessage(), ex); } } @@ -93,36 +103,24 @@ public void delete(PriamInstance inst) { } @Override - public void update(PriamInstance inst) { + public void update(PriamInstance orig, PriamInstance inst) { try { - dao.createInstance(inst); + dao.updateInstance(orig, inst); } catch (AmazonServiceException e) { throw new RuntimeException("Unable to update/create priam instance", e); } } - @Override - public void sort(List return_) { - Comparator comparator = new Comparator() { - - @Override - public int compare(PriamInstance o1, PriamInstance o2) { - - Integer c1 = o1.getId(); - Integer c2 = o2.getId(); - return c1.compareTo(c2); - } - }; - Collections.sort(return_, comparator); - } - - @Override - public void attachVolumes(PriamInstance instance, String mountPath, String device) { - // TODO Auto-generated method stub - } - - private PriamInstance makePriamInstance(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String token) { - Map v = (volumes == null) ? new HashMap() : volumes; + private PriamInstance makePriamInstance( + String app, + int id, + String instanceID, + String hostname, + String ip, + String rac, + Map volumes, + String token) { + Map v = (volumes == null) ? new HashMap<>() : volumes; PriamInstance ins = new PriamInstance(); ins.setApp(app); ins.setRac(rac); @@ -130,7 +128,7 @@ private PriamInstance makePriamInstance(String app, int id, String instanceID, S ins.setHostIP(ip); ins.setId(id); ins.setInstanceId(instanceID); - ins.setDC(config.getDC()); + ins.setDC(instanceInfo.getRegion()); ins.setToken(token); ins.setVolumes(v); return ins; diff --git a/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java b/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java index 41034a5f2..56ca053ba 100644 --- a/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java +++ b/priam/src/main/java/com/netflix/priam/aws/UpdateCleanupPolicy.java @@ -19,21 +19,18 @@ import com.google.inject.Inject; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.IBackupFileSystem; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.Task; import com.netflix.priam.scheduler.TaskTimer; import com.netflix.priam.utils.RetryableCallable; -/** - * Updates the cleanup policy for the bucket - * - */ +/** Updates the cleanup policy for the bucket */ @Singleton public class UpdateCleanupPolicy extends Task { public static final String JOBNAME = "UpdateCleanupPolicy"; - private IBackupFileSystem fs; + private final IBackupFileSystem fs; @Inject public UpdateCleanupPolicy(IConfiguration config, @Named("backup") IBackupFileSystem fs) { @@ -61,5 +58,4 @@ public String getName() { public static TaskTimer getTimer() { return new SimpleTimer(JOBNAME); } - } diff --git a/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java b/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java deleted file mode 100644 index 21c407223..000000000 --- a/priam/src/main/java/com/netflix/priam/aws/UpdateSecuritySettings.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.aws; - -import com.google.common.collect.Lists; -import com.google.inject.Inject; -import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.scheduler.SimpleTimer; -import com.netflix.priam.scheduler.Task; -import com.netflix.priam.scheduler.TaskTimer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashSet; -import java.util.List; -import java.util.Random; -import java.util.Set; - -/** - * this class will associate an Public IP's with a new instance so they can talk - * across the regions. - * - * Requirement: 1) Nodes in the same region needs to be able to talk to each - * other. 2) Nodes in other regions needs to be able to talk to t`he others in - * the other region. - * - * Assumption: 1) IPriamInstanceFactory will provide the membership... and will - * be visible across the regions 2) IMembership amazon or any other - * implementation which can tell if the instance is part of the group (ASG in - * amazons case). - * - */ -@Singleton -public class UpdateSecuritySettings extends Task { - private static final Logger logger = LoggerFactory.getLogger(UpdateSecuritySettings.class); - public static final String JOBNAME = "Update_SG"; - public static boolean firstTimeUpdated = false; - - private static final Random ran = new Random(); - private final IMembership membership; - private final IPriamInstanceFactory factory; - - @Inject - //Note: do not parameterized the generic type variable to an implementation as it confuses Guice in the binding. - public UpdateSecuritySettings(IConfiguration config, IMembership membership, IPriamInstanceFactory factory) { - super(config); - this.membership = membership; - this.factory = factory; - } - - /** - * Seeds nodes execute this at the specifed interval. - * Other nodes run only on startup. - * Seeds in cassandra are the first node in each Availablity Zone. - */ - @Override - public void execute() { - // if seed dont execute. - int port = config.getSSLStoragePort(); - List acls = membership.listACL(port, port); - List instances = factory.getAllIds(config.getAppName()); - - // iterate to add... - Set add = new HashSet(); - List allInstances = factory.getAllIds(config.getAppName()); - for (PriamInstance instance : allInstances) { - String range = instance.getHostIP() + "/32"; - if (!acls.contains(range)) - add.add(range); - } - if (add.size() > 0) { - membership.addACL(add, port, port); - firstTimeUpdated = true; - } - - // just iterate to generate ranges. - List currentRanges = Lists.newArrayList(); - for (PriamInstance instance : instances) { - String range = instance.getHostIP() + "/32"; - currentRanges.add(range); - } - - // iterate to remove... - List remove = Lists.newArrayList(); - for (String acl : acls) - if (!currentRanges.contains(acl)) // if not found then remove.... - remove.add(acl); - if (remove.size() > 0) { - membership.removeACL(remove, port, port); - firstTimeUpdated = true; - } - } - - public static TaskTimer getTimer(InstanceIdentity id) { - SimpleTimer return_; - if (id.isSeed()) { - logger.info("Seed node. Instance id: {}" - + ", host ip: {}" - + ", host name: {}", - id.getInstance().getInstanceId(), id.getInstance().getHostIP(), id.getInstance().getHostName()); - return_ = new SimpleTimer(JOBNAME, 120 * 1000 + ran.nextInt(120 * 1000)); - } else - return_ = new SimpleTimer(JOBNAME); - return return_; - } - - @Override - public String getName() { - return JOBNAME; - } -} diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java index f19805256..c01c92442 100644 --- a/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java +++ b/priam/src/main/java/com/netflix/priam/aws/auth/EC2RoleAssumptionCredential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws.auth; @@ -20,20 +18,21 @@ import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cred.ICredential; -import com.netflix.priam.identity.InstanceEnvIdentity; +import com.netflix.priam.identity.config.InstanceInfo; public class EC2RoleAssumptionCredential implements ICredential { private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "AwsRoleAssumptionSession"; - private ICredential cred; - private IConfiguration config; - private InstanceEnvIdentity insEnvIdentity; + private final ICredential cred; + private final IConfiguration config; + private final InstanceInfo instanceInfo; private AWSCredentialsProvider stsSessionCredentialsProvider; @Inject - public EC2RoleAssumptionCredential(ICredential cred, IConfiguration config, InstanceEnvIdentity insEnvIdentity) { + public EC2RoleAssumptionCredential( + ICredential cred, IConfiguration config, InstanceInfo instanceInfo) { this.cred = cred; this.config = config; - this.insEnvIdentity = insEnvIdentity; + this.instanceInfo = instanceInfo; } @Override @@ -42,40 +41,48 @@ public AWSCredentialsProvider getAwsCredentialProvider() { synchronized (this) { if (this.stsSessionCredentialsProvider == null) { - String roleArn = null; + String roleArn; /** - * Create the assumed IAM role based on the environment. - * For example, if the current environment is VPC, - * then the assumed role is for EC2 classic, and vice versa. + * Create the assumed IAM role based on the environment. For example, if the + * current environment is VPC, then the assumed role is for EC2 classic, and + * vice versa. */ - if (this.insEnvIdentity.isClassic()) { - roleArn = this.config.getClassicEC2RoleAssumptionArn(); // Env is EC2 classic --> IAM assumed role for VPC created + if (instanceInfo.getInstanceEnvironment() + == InstanceInfo.InstanceEnvironment.CLASSIC) { + roleArn = this.config.getClassicEC2RoleAssumptionArn(); + // Env is EC2 classic --> IAM assumed role for VPC created } else { - roleArn = this.config.getVpcEC2RoleAssumptionArn(); // Env is VPC --> IAM assumed role for EC2 classic created + roleArn = this.config.getVpcEC2RoleAssumptionArn(); + // Env is VPC --> IAM assumed role for EC2 classic created. } // if (roleArn == null || roleArn.isEmpty()) - throw new NullPointerException("Role ARN is null or empty probably due to missing config entry"); - + throw new NullPointerException( + "Role ARN is null or empty probably due to missing config entry"); /** - * Get handle to an implementation that uses AWS Security Token Service (STS) to create temporary, - * short-lived session with explicit refresh for session/token expiration. + * Get handle to an implementation that uses AWS Security Token Service (STS) to + * create temporary, short-lived session with explicit refresh for session/token + * expiration. */ try { - this.stsSessionCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(this.cred.getAwsCredentialProvider(), roleArn, AWS_ROLE_ASSUMPTION_SESSION_NAME); + this.stsSessionCredentialsProvider = + new STSAssumeRoleSessionCredentialsProvider( + this.cred.getAwsCredentialProvider(), + roleArn, + AWS_ROLE_ASSUMPTION_SESSION_NAME); } catch (Exception ex) { - throw new IllegalStateException("Exception in getting handle to AWS Security Token Service (STS). Msg: " + ex.getLocalizedMessage(), ex); + throw new IllegalStateException( + "Exception in getting handle to AWS Security Token Service (STS). Msg: " + + ex.getLocalizedMessage(), + ex); } - } - } } return this.stsSessionCredentialsProvider; - } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java b/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java index 266cf1eb3..86ede4744 100755 --- a/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java +++ b/priam/src/main/java/com/netflix/priam/aws/auth/IS3Credential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws.auth; diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java index 09f172ef1..03b4bf939 100755 --- a/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java +++ b/priam/src/main/java/com/netflix/priam/aws/auth/S3InstanceCredential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws.auth; @@ -24,10 +22,10 @@ */ public class S3InstanceCredential implements IS3Credential { - private InstanceProfileCredentialsProvider credentialsProvider; + private final InstanceProfileCredentialsProvider credentialsProvider; public S3InstanceCredential() { - this.credentialsProvider = new InstanceProfileCredentialsProvider(); + this.credentialsProvider = InstanceProfileCredentialsProvider.getInstance(); } @Override @@ -39,6 +37,4 @@ public AWSCredentials getCredentials() throws Exception { public AWSCredentialsProvider getAwsCredentialProvider() { return this.credentialsProvider; } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java b/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java index 1296f7ea7..21befc17b 100755 --- a/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java +++ b/priam/src/main/java/com/netflix/priam/aws/auth/S3RoleAssumptionCredential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.aws.auth; @@ -30,8 +28,8 @@ public class S3RoleAssumptionCredential implements IS3Credential { private static final String AWS_ROLE_ASSUMPTION_SESSION_NAME = "S3RoleAssumptionSession"; private static final Logger logger = LoggerFactory.getLogger(S3RoleAssumptionCredential.class); - private ICredential cred; - private IConfiguration config; + private final ICredential cred; + private final IConfiguration config; private AWSCredentialsProvider stsSessionCredentialsProvider; @Inject @@ -68,27 +66,37 @@ public AWSCredentialsProvider getAwsCredentialProvider() { synchronized (this) { if (this.stsSessionCredentialsProvider == null) { - final String roleArn = this.config.getAWSRoleAssumptionArn(); //IAM role created for bucket own by account "awsprodbackup" + final String roleArn = this.config.getAWSRoleAssumptionArn(); + // IAM role created for bucket own by account "awsprodbackup" if (roleArn == null || roleArn.isEmpty()) { - logger.warn("Role ARN is null or empty probably due to missing config entry. Falling back to instance level credentials"); + logger.warn( + "Role ARN is null or empty probably due to missing config entry. Falling back to instance level credentials"); this.stsSessionCredentialsProvider = this.cred.getAwsCredentialProvider(); - //throw new NullPointerException("Role ARN is null or empty probably due to missing config entry"); + // throw new NullPointerException("Role ARN is null or empty probably due to + // missing config entry"); } else { - //== Get handle to an implementation that uses AWS Security Token Service (STS) to create temporary, short-lived session with explicit refresh for session/token expiration. + // Get handle to an implementation that uses AWS Security Token Service + // (STS) to create temporary, short-lived session with explicit refresh for + // session/token expiration. try { - this.stsSessionCredentialsProvider = new STSAssumeRoleSessionCredentialsProvider(this.cred.getAwsCredentialProvider(), roleArn, AWS_ROLE_ASSUMPTION_SESSION_NAME); + this.stsSessionCredentialsProvider = + new STSAssumeRoleSessionCredentialsProvider( + this.cred.getAwsCredentialProvider(), + roleArn, + AWS_ROLE_ASSUMPTION_SESSION_NAME); } catch (Exception ex) { - throw new IllegalStateException("Exception in getting handle to AWS Security Token Service (STS). Msg: " + ex.getLocalizedMessage(), ex); + throw new IllegalStateException( + "Exception in getting handle to AWS Security Token Service (STS). Msg: " + + ex.getLocalizedMessage(), + ex); } } - } } } return this.stsSessionCredentialsProvider; } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java b/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java index 611aa6ab8..bd8cab2cc 100644 --- a/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java +++ b/priam/src/main/java/com/netflix/priam/backup/AbstractBackup.java @@ -16,181 +16,125 @@ */ package com.netflix.priam.backup; -import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.google.inject.Provider; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; import com.netflix.priam.scheduler.Task; -import com.netflix.priam.utils.RetryableCallable; import com.netflix.priam.utils.SystemUtils; +import java.io.File; +import java.io.FileFilter; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.List; - -/** - * Abstract Backup class for uploading files to backup location - */ -public abstract class AbstractBackup extends Task{ +/** Abstract Backup class for uploading files to backup location */ +public abstract class AbstractBackup extends Task { private static final Logger logger = LoggerFactory.getLogger(AbstractBackup.class); - public static final String INCREMENTAL_BACKUP_FOLDER = "backups"; + static final String INCREMENTAL_BACKUP_FOLDER = "backups"; public static final String SNAPSHOT_FOLDER = "snapshots"; - protected final Provider pathFactory; - - protected IBackupFileSystem fs; - @Inject - public AbstractBackup(IConfiguration config, IFileSystemContext backupFileSystemCtx, - Provider pathFactory) { + public AbstractBackup(IConfiguration config) { super(config); - this.pathFactory = pathFactory; - this.fs = backupFileSystemCtx.getFileStrategy(config); - } - - /** - * A means to override the type of backup strategy chosen via BackupFileSystemContext - */ - protected void setFileSystem(IBackupFileSystem fs) { - this.fs = fs; - } - - /** - * Upload files in the specified dir. Does not delete the file in case of - * error. The files are uploaded serially. - * - * @param parent Parent dir - * @param type Type of file (META, SST, SNAP etc) - * @return List of files that are successfully uploaded as part of backup - * @throws Exception when there is failure in uploading files. - */ - List upload(File parent, final BackupFileType type) throws Exception { - final List bps = Lists.newArrayList(); - for (final File file : parent.listFiles()) { - //== decorate file with metadata - final AbstractBackupPath bp = pathFactory.get(); - bp.parseLocal(file, type); - - try { - logger.info("About to upload file {} for backup", file.getCanonicalFile()); - - AbstractBackupPath abp = new RetryableCallable(3, RetryableCallable.DEFAULT_WAIT_TIME) { - public AbstractBackupPath retriableCall() throws Exception { - upload(bp); - file.delete(); - return bp; - } - }.call(); - - if (abp != null) - bps.add(abp); - - addToRemotePath(abp.getRemotePath()); - } catch (Exception e) { - //Throw exception to the caller. This will allow them to take appropriate decision. - logger.error("Failed to upload local file {} within CF {}.", file.getCanonicalFile(), parent.getAbsolutePath(), e); - throw e; - } - } - return bps; } - - /** - * Upload specified file (RandomAccessFile) with retries - * - * @param bp backup path to be uploaded. - */ - protected void upload(final AbstractBackupPath bp) throws Exception { - new RetryableCallable() { - @Override - public Void retriableCall() throws Exception { - java.io.InputStream is = null; - try { - is = bp.localReader(); - if (is == null) { - throw new NullPointerException("Unable to get handle on file: " + bp.fileName); - } - fs.upload(bp, is); - bp.setCompressedFileSize(fs.getBytesUploaded()); - return null; - } catch (Exception e) { - logger.error("Exception uploading local file {}, releasing handle, and will retry.", bp.backupFile.getCanonicalFile()); - if (is != null) { - is.close(); - } - throw e; - } - - } - }.call(); - } - - protected final void initiateBackup(String monitoringFolder, BackupRestoreUtil backupRestoreUtil) throws Exception { + protected final void initiateBackup( + String monitoringFolder, BackupRestoreUtil backupRestoreUtil) throws Exception { File dataDir = new File(config.getDataFileLocation()); - if (!dataDir.exists()) { - throw new IllegalArgumentException("The configured 'data file location' does not exist: " - + config.getDataFileLocation()); + if (!dataDir.exists() || !dataDir.isDirectory()) { + throw new IllegalArgumentException( + "The configured 'data file location' does not exist or is not a directory: " + + config.getDataFileLocation()); } logger.debug("Scanning for backup in: {}", dataDir.getAbsolutePath()); - for (File keyspaceDir : dataDir.listFiles()) { - if (keyspaceDir.isFile()) - continue; + File[] keyspaceDirectories = dataDir.listFiles(); + if (keyspaceDirectories == null) return; + + for (File keyspaceDir : keyspaceDirectories) { + if (keyspaceDir.isFile()) continue; logger.debug("Entering {} keyspace..", keyspaceDir.getName()); + File[] columnFamilyDirectories = keyspaceDir.listFiles(); + if (columnFamilyDirectories == null) continue; - for (File columnFamilyDir : keyspaceDir.listFiles()) { + for (File columnFamilyDir : columnFamilyDirectories) { File backupDir = new File(columnFamilyDir, monitoringFolder); - - if (!isValidBackupDir(keyspaceDir, backupDir)) { - continue; - } - - String columnFamilyName = columnFamilyDir.getName().split("-")[0]; - if (backupRestoreUtil.isFiltered(keyspaceDir.getName(), columnFamilyDir.getName())) { - //Clean the backup/snapshot directory else files will keep getting accumulated. - SystemUtils.cleanupDir(backupDir.getAbsolutePath(), null); - continue; + if (isAReadableDirectory(backupDir)) { + String columnFamilyName = getColumnFamily(backupDir); + if (backupRestoreUtil.isFiltered(keyspaceDir.getName(), columnFamilyName)) { + // Clean the backup/snapshot directory else files will keep getting + // accumulated. + SystemUtils.cleanupDir(backupDir.getAbsolutePath(), null); + } else { + processColumnFamily(backupDir); + } } + } // end processing all CFs for keyspace + } // end processing keyspaces under the C* data dir + } - processColumnFamily(keyspaceDir.getName(), columnFamilyName, backupDir); - - } //end processing all CFs for keyspace - } //end processing keyspaces under the C* data dir + protected String getColumnFamily(File backupDir) { + return backupDir.getParentFile().getName().split("-")[0]; + } + protected String getKeyspace(File backupDir) { + return backupDir.getParentFile().getParentFile().getName(); } /** * Process the columnfamily in a given snapshot/backup directory. * - * @param keyspace Name of the keyspace - * @param columnFamily Name of the columnfamily - * @param backupDir Location of the backup/snapshot directory in that columnfamily. + * @param backupDir Location of the backup/snapshot directory in that columnfamily. * @throws Exception throws exception if there is any error in process the directory. */ - protected abstract void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception; - + protected abstract void processColumnFamily(File backupDir) throws Exception; /** - * Filters unwanted keyspaces + * Get all the backup directories for Cassandra. + * + * @param config to get the location of the data folder. + * @param monitoringFolder folder where cassandra backup's are configured. + * @return Set of the path(s) containing the backup folder for each columnfamily. + * @throws Exception incase of IOException. */ - private boolean isValidBackupDir(File keyspaceDir, File backupDir) { - if (!backupDir.isDirectory() && !backupDir.exists()) - return false; - String keyspaceName = keyspaceDir.getName(); - if (BackupRestoreUtil.FILTER_KEYSPACE.contains(keyspaceName)) { - logger.debug("{} is not consider a valid keyspace backup directory, will be bypass.", keyspaceName); - return false; - } + public static Set getBackupDirectories(IConfiguration config, String monitoringFolder) + throws Exception { + HashSet backupPaths = new HashSet<>(); + if (config.getDataFileLocation() == null) return backupPaths; + Path dataPath = Paths.get(config.getDataFileLocation()); + if (Files.exists(dataPath) && Files.isDirectory(dataPath)) + try (DirectoryStream directoryStream = + Files.newDirectoryStream(dataPath, path -> Files.isDirectory(path))) { + for (Path keyspaceDirPath : directoryStream) { + try (DirectoryStream keyspaceStream = + Files.newDirectoryStream( + keyspaceDirPath, path -> Files.isDirectory(path))) { + for (Path columnfamilyDirPath : keyspaceStream) { + Path backupDirPath = + Paths.get(columnfamilyDirPath.toString(), monitoringFolder); + if (Files.exists(backupDirPath) && Files.isDirectory(backupDirPath)) { + logger.debug("Backup folder: {}", backupDirPath); + backupPaths.add(backupDirPath); + } + } + } + } + } + return backupPaths; + } - return true; + protected static File[] getSecondaryIndexDirectories(File backupDir) { + FileFilter filter = (file) -> file.getName().startsWith(".") && isAReadableDirectory(file); + return Optional.ofNullable(backupDir.listFiles(filter)).orElse(new File[] {}); } - /** - * Adds Remote path to the list of Remote Paths - */ - protected abstract void addToRemotePath(String remotePath); + protected static boolean isAReadableDirectory(File dir) { + return dir.exists() && dir.isDirectory() && dir.canRead(); + } } diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java b/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java index 28b049e51..e5c9a69fa 100644 --- a/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java +++ b/priam/src/main/java/com/netflix/priam/backup/AbstractBackupPath.java @@ -16,40 +16,66 @@ */ package com.netflix.priam.backup; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; import com.google.inject.ImplementedBy; +import com.netflix.priam.aws.RemoteBackupPath; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.config.BackupsToCompress; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.aws.S3BackupPath; +import com.netflix.priam.cryptography.CryptographyAlgorithm; import com.netflix.priam.identity.InstanceIdentity; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.joda.time.DateTime; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.priam.utils.DateUtil; import java.io.File; import java.io.IOException; -import java.io.InputStream; -import java.io.RandomAccessFile; -import java.text.ParseException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.time.Instant; import java.util.Date; +import java.util.Optional; +import org.apache.commons.lang3.StringUtils; -@ImplementedBy(S3BackupPath.class) +@ImplementedBy(RemoteBackupPath.class) public abstract class AbstractBackupPath implements Comparable { - private static final Logger logger = LoggerFactory.getLogger(AbstractBackupPath.class); - private static final String FMT = "yyyyMMddHHmm"; - private static final DateTimeFormatter DATE_FORMAT = DateTimeFormat.forPattern(FMT); public static final char PATH_SEP = File.separatorChar; + public static final Joiner PATH_JOINER = Joiner.on(PATH_SEP); + private static final ImmutableMap FOLDER_POSITIONS = + ImmutableMap.of(BackupFolder.BACKUPS, 3, BackupFolder.SNAPSHOTS, 4); public enum BackupFileType { - SNAP, SST, CL, META, META_V2; + CL, + META, + META_V2, + SECONDARY_INDEX_V2, + SNAP, + SNAPSHOT_VERIFIED, + SST, + SST_V2; + + private static ImmutableSet DATA_FILE_TYPES = + ImmutableSet.of(SECONDARY_INDEX_V2, SNAP, SST, SST_V2); + + private static ImmutableSet V2_FILE_TYPES = + ImmutableSet.of(SECONDARY_INDEX_V2, SST_V2, META_V2); + + public static boolean isDataFile(BackupFileType type) { + return DATA_FILE_TYPES.contains(type); + } - public static boolean isDataFile(BackupFileType type){ - if (type != BackupFileType.META && type != BackupFileType.META_V2 && type != BackupFileType.CL) - return true; + public static boolean isV2(BackupFileType type) { + return V2_FILE_TYPES.contains(type); + } - return false; + public static BackupFileType fromString(String s) throws BackupRestoreException { + try { + return BackupFileType.valueOf(s); + } catch (IllegalArgumentException e) { + throw new BackupRestoreException(String.format("Unknown BackupFileType %s", s)); + } } } @@ -61,100 +87,111 @@ public static boolean isDataFile(BackupFileType type){ protected String baseDir; protected String token; protected String region; + protected String indexDir; protected Date time; - protected long size; //uncompressed file size - protected long compressedFileSize = 0; - protected boolean isCassandra1_0; - - protected final InstanceIdentity factory; + private long size; // uncompressed file size + private long compressedFileSize = 0; + protected final InstanceIdentity instanceIdentity; protected final IConfiguration config; protected File backupFile; - protected Date uploadedTs; - - public AbstractBackupPath(IConfiguration config, InstanceIdentity factory) { - this.factory = factory; + private Instant lastModified; + private Instant creationTime; + private Date uploadedTs; + private CompressionType compression; + private CryptographyAlgorithm encryption = CryptographyAlgorithm.PLAINTEXT; + private boolean isIncremental; + + public AbstractBackupPath(IConfiguration config, InstanceIdentity instanceIdentity) { + this.instanceIdentity = instanceIdentity; this.config = config; + this.compression = + config.getBackupsToCompress() == BackupsToCompress.NONE + ? CompressionType.NONE + : CompressionType.SNAPPY; } - public static String formatDate(Date d) { - return new DateTime(d).toString(FMT); - } - - public Date parseDate(String s) { - return DATE_FORMAT.parseDateTime(s).toDate(); - } - - public InputStream localReader() throws IOException { - assert backupFile != null; - return new RafInputStream(new RandomAccessFile(backupFile, "r")); - } - - public void parseLocal(File file, BackupFileType type) throws ParseException { - // TODO cleanup. + public void parseLocal(File file, BackupFileType type) { this.backupFile = file; - - String rpath = new File(config.getDataFileLocation()).toURI().relativize(file.toURI()).getPath(); - String[] elements = rpath.split("" + PATH_SEP); - this.clusterName = config.getAppName(); this.baseDir = config.getBackupLocation(); - this.region = config.getDC(); - this.token = factory.getInstance().getToken(); + this.clusterName = config.getAppName(); + this.fileName = file.getName(); + BasicFileAttributes fileAttributes; + try { + fileAttributes = Files.readAttributes(file.toPath(), BasicFileAttributes.class); + this.lastModified = fileAttributes.lastModifiedTime().toInstant(); + this.creationTime = fileAttributes.creationTime().toInstant(); + this.size = fileAttributes.size(); + } catch (IOException e) { + this.lastModified = Instant.ofEpochMilli(0L); + this.creationTime = Instant.ofEpochMilli(0L); + this.size = 0L; + } + this.region = instanceIdentity.getInstanceInfo().getRegion(); + this.token = instanceIdentity.getInstance().getToken(); this.type = type; + + String rpath = + new File(config.getDataFileLocation()).toURI().relativize(file.toURI()).getPath(); + String[] parts = rpath.split("" + PATH_SEP); if (BackupFileType.isDataFile(type)) { - this.keyspace = elements[0]; - if (!isCassandra1_0) - this.columnFamily = elements[1]; + this.keyspace = parts[0]; + this.columnFamily = parts[1]; + } + if (BackupFileType.isDataFile(type)) { + Optional folder = BackupFolder.fromName(parts[2]); + this.isIncremental = folder.filter(BackupFolder.BACKUPS::equals).isPresent(); + if (type == BackupFileType.SECONDARY_INDEX_V2) { + Integer index = folder.map(FOLDER_POSITIONS::get).orElse(null); + Preconditions.checkNotNull(index, "Unrecognized backup folder " + parts[2]); + this.indexDir = parts[index]; + } } - if (type == BackupFileType.SNAP) - time = parseDate(elements[3]); - if (type == BackupFileType.SST || type == BackupFileType.CL) - time = new Date(file.lastModified()); - this.fileName = file.getName(); - this.size = file.length(); - } - /** - * Given a date range, find a common string prefix Eg: 20120212, 20120213 = - * 2012021 - */ - public String match(Date start, Date end) { - String sString = formatDate(start); - String eString = formatDate(end); + /* + 1. For old style snapshots, make this value to time at which backup was executed. + 2. This is to ensure that all the files from the snapshot are uploaded under single directory in remote file system. + 3. For META files we always override the time field via @link{Metadata#decorateMetaJson} + */ + this.time = + type == BackupFileType.SNAP + ? DateUtil.getDate(parts[3]) + : new Date(lastModified.toEpochMilli()); + } + + /** Given a date range, find a common string prefix Eg: 20120212, 20120213 = 2012021 */ + protected String match(Date start, Date end) { + String sString = DateUtil.formatyyyyMMddHHmm(start); // formatDate(start); + String eString = DateUtil.formatyyyyMMddHHmm(end); // formatDate(end); int diff = StringUtils.indexOfDifference(sString, eString); - if (diff < 0) - return sString; + if (diff < 0) return sString; return sString.substring(0, diff); } - /** - * Local restore file - */ + /** Local restore file */ public File newRestoreFile() { - StringBuffer buff = new StringBuffer(); - if (type == BackupFileType.CL) { - buff.append(config.getBackupCommitLogLocation()).append(PATH_SEP); - } else { - - buff.append(config.getDataFileLocation()).append(PATH_SEP); - if (type != BackupFileType.META && type != BackupFileType.META_V2) { - if (isCassandra1_0) - buff.append(keyspace).append(PATH_SEP); - else - buff.append(keyspace).append(PATH_SEP).append(columnFamily).append(PATH_SEP); - } + File return_; + String dataDir = config.getDataFileLocation(); + switch (type) { + case CL: + return_ = new File(PATH_JOINER.join(config.getBackupCommitLogLocation(), fileName)); + break; + case SECONDARY_INDEX_V2: + String restoreFileName = + PATH_JOINER.join(dataDir, keyspace, columnFamily, indexDir, fileName); + return_ = new File(restoreFileName); + break; + case META: + case META_V2: + return_ = new File(PATH_JOINER.join(config.getDataFileLocation(), fileName)); + break; + default: + return_ = new File(PATH_JOINER.join(dataDir, keyspace, columnFamily, fileName)); } - - buff.append(fileName); - - File return_ = new File(buff.toString()); File parent = new File(return_.getParent()); - if (!parent.exists()) - parent.mkdirs(); + if (!parent.exists()) parent.mkdirs(); return return_; } - - @Override public int compareTo(AbstractBackupPath o) { return getRemotePath().compareTo(o.getRemotePath()); @@ -162,35 +199,27 @@ public int compareTo(AbstractBackupPath o) { @Override public boolean equals(Object obj) { - if (!obj.getClass().equals(this.getClass())) - return false; - return getRemotePath().equals(((AbstractBackupPath) obj).getRemotePath()); + return obj.getClass().equals(this.getClass()) + && getRemotePath().equals(((AbstractBackupPath) obj).getRemotePath()); } - /** - * Get remote prefix for this path object - */ + /** Get remote prefix for this path object */ public abstract String getRemotePath(); - /** - * Parses a fully constructed remote path - */ + /** Parses a fully constructed remote path */ public abstract void parseRemote(String remoteFilePath); - /** - * Parses paths with just token prefixes - */ + /** Parses paths with just token prefixes */ public abstract void parsePartialPrefix(String remoteFilePath); /** - * Provides a common prefix that matches all objects that fall between - * the start and end time + * Provides a common prefix that matches all objects that fall between the start and end time */ public abstract String remotePrefix(Date start, Date end, String location); - /** - * Provides the cluster prefix - */ + public abstract Path remoteV2Prefix(Path location, BackupFileType fileType); + + /** Provides the cluster prefix */ public abstract String clusterPrefix(String location); public BackupFileType getType() { @@ -217,10 +246,6 @@ public String getFileName() { return fileName; } - public String getBaseDir() { - return baseDir; - } - public String getToken() { return token; } @@ -233,6 +258,10 @@ public Date getTime() { return time; } + public void setTime(Date time) { + this.time = time; + } + /* @return original, uncompressed file size */ @@ -256,20 +285,12 @@ public File getBackupFile() { return backupFile; } - public boolean isCassandra1_0() { - return isCassandra1_0; - } - - public void setCassandra1_0(boolean isCassandra1_0) { - this.isCassandra1_0 = isCassandra1_0; - } - public void setFileName(String fileName) { this.fileName = fileName; } public InstanceIdentity getInstanceIdentity() { - return this.factory; + return this.instanceIdentity; } public void setUploadedTs(Date uploadedTs) { @@ -280,27 +301,41 @@ public Date getUploadedTs() { return this.uploadedTs; } - public static class RafInputStream extends InputStream { - private RandomAccessFile raf; + public Instant getLastModified() { + return lastModified; + } - public RafInputStream(RandomAccessFile raf) { - this.raf = raf; - } + public void setLastModified(Instant instant) { + this.lastModified = instant; + } - @Override - public synchronized int read(byte[] bytes, int off, int len) throws IOException { - return raf.read(bytes, off, len); - } + public Instant getCreationTime() { + return creationTime; + } - @Override - public void close() { - IOUtils.closeQuietly(raf); - } + @VisibleForTesting + public void setCreationTime(Instant instant) { + this.creationTime = instant; + } - @Override - public int read() throws IOException { - return 0; - } + public CompressionType getCompression() { + return compression; + } + + public void setCompression(CompressionType compressionType) { + this.compression = compressionType; + } + + public CryptographyAlgorithm getEncryption() { + return encryption; + } + + public void setEncryption(String encryption) { + this.encryption = CryptographyAlgorithm.valueOf(encryption); + } + + public boolean isIncremental() { + return isIncremental; } @Override diff --git a/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java b/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java new file mode 100644 index 000000000..a39190ab5 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/AbstractFileSystem.java @@ -0,0 +1,386 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupEvent; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.notification.EventGenerator; +import com.netflix.priam.notification.EventObserver; +import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor; +import com.netflix.priam.utils.BoundedExponentialRetryCallable; +import com.netflix.spectator.api.patterns.PolledMeter; +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.*; +import org.apache.commons.collections4.iterators.FilterIterator; +import org.apache.commons.collections4.iterators.TransformIterator; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is responsible for managing parallelism and orchestrating the upload and download, but + * the subclasses actually implement the details of uploading a file. + * + *

Created by aagrawal on 8/30/18. + */ +public abstract class AbstractFileSystem implements IBackupFileSystem, EventGenerator { + private static final Logger logger = LoggerFactory.getLogger(AbstractFileSystem.class); + protected final Provider pathProvider; + private final CopyOnWriteArrayList> observers = + new CopyOnWriteArrayList<>(); + private final IConfiguration configuration; + protected final BackupMetrics backupMetrics; + private final Set tasksQueued; + private final ListeningExecutorService fileUploadExecutor; + private final ThreadPoolExecutor fileDownloadExecutor; + + // This is going to be a write-thru cache containing the most frequently used items from remote + // file system. This is to ensure that we don't make too many API calls to remote file system. + private final Cache objectCache; + + @Inject + public AbstractFileSystem( + IConfiguration configuration, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + Provider pathProvider) { + this.configuration = configuration; + this.backupMetrics = backupMetrics; + this.pathProvider = pathProvider; + // Add notifications. + this.addObserver(backupNotificationMgr); + this.objectCache = + CacheBuilder.newBuilder().maximumSize(configuration.getBackupQueueSize()).build(); + tasksQueued = new ConcurrentHashMap<>().newKeySet(); + /* + Note: We are using different queue for upload and download as with Backup V2.0 we might download all the meta + files for "sync" feature which might compete with backups for scheduling. + Also, we may want to have different TIMEOUT for each kind of operation (upload/download) based on our file system choices. + */ + BlockingQueue uploadQueue = + new ArrayBlockingQueue<>(configuration.getBackupQueueSize()); + PolledMeter.using(backupMetrics.getRegistry()) + .withName(backupMetrics.uploadQueueSize) + .monitorSize(uploadQueue); + this.fileUploadExecutor = + MoreExecutors.listeningDecorator( + new BlockingSubmitThreadPoolExecutor( + configuration.getBackupThreads(), + uploadQueue, + configuration.getUploadTimeout())); + + BlockingQueue downloadQueue = + new ArrayBlockingQueue<>(configuration.getDownloadQueueSize()); + PolledMeter.using(backupMetrics.getRegistry()) + .withName(backupMetrics.downloadQueueSize) + .monitorSize(downloadQueue); + this.fileDownloadExecutor = + new BlockingSubmitThreadPoolExecutor( + configuration.getRestoreThreads(), + downloadQueue, + configuration.getDownloadTimeout()); + } + + @Override + public Future asyncDownloadFile(final AbstractBackupPath path, final int retry) + throws RejectedExecutionException { + return fileDownloadExecutor.submit( + () -> { + downloadFile(path, "" /* suffix */, retry); + return Paths.get(path.getRemotePath()); + }); + } + + @Override + public void downloadFile(final AbstractBackupPath path, String suffix, final int retry) + throws BackupRestoreException { + // TODO: Should we download the file if localPath already exists? + String remotePath = path.getRemotePath(); + String localPath = path.newRestoreFile().getAbsolutePath() + suffix; + logger.info("Downloading file: {} to location: {}", path.getRemotePath(), localPath); + try { + new BoundedExponentialRetryCallable(500, 10000, retry) { + @Override + public Void retriableCall() throws Exception { + downloadFileImpl(path, suffix); + return null; + } + }.call(); + // Note we only downloaded the bytes which are represented on file system (they are + // compressed and maybe encrypted). + // File size after decompression or decryption might be more/less. + backupMetrics.recordDownloadRate(getFileSize(remotePath)); + backupMetrics.incrementValidDownloads(); + logger.info("Successfully downloaded file: {} to location: {}", remotePath, localPath); + } catch (Exception e) { + backupMetrics.incrementInvalidDownloads(); + logger.error("Error while downloading file: {} to location: {}", remotePath, localPath); + throw new BackupRestoreException(e.getMessage()); + } + } + + protected abstract void downloadFileImpl(final AbstractBackupPath path, String suffix) + throws BackupRestoreException; + + @Override + public ListenableFuture uploadAndDelete( + final AbstractBackupPath path, Instant target, boolean async) + throws RejectedExecutionException, BackupRestoreException { + if (async) { + return fileUploadExecutor.submit( + () -> uploadAndDeleteInternal(path, target, 10 /* retries */)); + } else { + return Futures.immediateFuture(uploadAndDeleteInternal(path, target, 10 /* retries */)); + } + } + + @VisibleForTesting + public AbstractBackupPath uploadAndDeleteInternal( + final AbstractBackupPath path, Instant target, int retry) + throws RejectedExecutionException, BackupRestoreException { + Path localPath = Paths.get(path.getBackupFile().getAbsolutePath()); + File localFile = localPath.toFile(); + Preconditions.checkArgument( + localFile.exists(), String.format("Can't upload nonexistent %s", localPath)); + Preconditions.checkArgument( + !localFile.isDirectory(), + String.format("Can only upload files %s is a directory", localPath)); + Path remotePath = Paths.get(path.getRemotePath()); + + if (tasksQueued.add(localPath)) { + logger.info("Uploading file: {} to location: {}", localPath, remotePath); + try { + long uploadedFileSize; + + // Upload file if it not present at remote location. + if (path.getType() != BackupFileType.SST_V2 || !checkObjectExists(remotePath)) { + notifyEventStart(new BackupEvent(path)); + uploadedFileSize = + new BoundedExponentialRetryCallable( + 500 /* minSleep */, 10000 /* maxSleep */, retry) { + @Override + public Long retriableCall() throws Exception { + return uploadFileImpl(path, target); + } + }.call(); + + // Add to cache after successful upload. + // We only add SST_V2 as other file types are usually not checked, so no point + // evicting our SST_V2 results. + if (path.getType() == BackupFileType.SST_V2) addObjectCache(remotePath); + + backupMetrics.recordUploadRate(uploadedFileSize); + backupMetrics.incrementValidUploads(); + path.setCompressedFileSize(uploadedFileSize); + notifyEventSuccess(new BackupEvent(path)); + } else { + // file is already uploaded to remote file system. + logger.info("File: {} already present on remoteFileSystem.", remotePath); + } + + logger.info( + "Successfully uploaded file: {} to location: {}", localPath, remotePath); + + if (!FileUtils.deleteQuietly(localFile)) + logger.warn( + String.format( + "Failed to delete local file %s.", + localFile.getAbsolutePath())); + + } catch (Exception e) { + backupMetrics.incrementInvalidUploads(); + notifyEventFailure(new BackupEvent(path)); + logger.error( + "Error while uploading file: {} to location: {}. Exception: Msg: [{}], Trace: {}", + localPath, + remotePath, + e.getMessage(), + e.getStackTrace()); + throw new BackupRestoreException(e.getMessage()); + } finally { + // Remove the task from the list so if we try to upload file ever again, we can. + tasksQueued.remove(localPath); + } + } else logger.info("Already in queue, no-op. File: {}", localPath); + return path; + } + + private void addObjectCache(Path remotePath) { + objectCache.put(remotePath, Boolean.TRUE); + } + + @Override + public boolean checkObjectExists(Path remotePath) { + // Check in cache, if remote file exists. + Boolean cacheResult = objectCache.getIfPresent(remotePath); + + // Cache hit. Return the value. + if (cacheResult != null) return cacheResult; + + // Cache miss - Check remote file system if object exist. + boolean remoteFileExist = doesRemoteFileExist(remotePath); + + if (remoteFileExist) addObjectCache(remotePath); + + return remoteFileExist; + } + + @Override + public void deleteRemoteFiles(List remotePaths) throws BackupRestoreException { + if (remotePaths == null) return; + + // Note that we are trying to implement write-thru cache here so it is good idea to + // invalidate the cache first. This is important so that if there is any issue (because file + // was deleted), it is caught by our snapshot job we can re-upload the file. This will also + // help in ensuring that our validation job fails if there are any error caused due to TTL + // of a file. + objectCache.invalidateAll(remotePaths); + deleteFiles(remotePaths); + } + + protected abstract void deleteFiles(List remotePaths) throws BackupRestoreException; + + protected abstract boolean doesRemoteFileExist(Path remotePath); + + protected abstract long uploadFileImpl(final AbstractBackupPath path, Instant target) + throws BackupRestoreException; + + @Override + public String getShard() { + return getPrefix().getName(0).toString(); + } + + @Override + public Path getPrefix() { + Path prefix = Paths.get(configuration.getBackupPrefix()); + + if (StringUtils.isNotBlank(configuration.getRestorePrefix())) { + prefix = Paths.get(configuration.getRestorePrefix()); + } + + return prefix; + } + + @Override + public Iterator listPrefixes(Date date) { + String prefix = pathProvider.get().clusterPrefix(getPrefix().toString()); + Iterator fileIterator = listFileSystem(prefix, File.pathSeparator, null); + + //noinspection unchecked + return new TransformIterator( + fileIterator, + remotePath -> { + AbstractBackupPath abstractBackupPath = pathProvider.get(); + abstractBackupPath.parsePartialPrefix(remotePath.toString()); + return abstractBackupPath; + }); + } + + @Override + public Iterator list(String path, Date start, Date till) { + String prefix = pathProvider.get().remotePrefix(start, till, path); + Iterator fileIterator = listFileSystem(prefix, null, null); + + @SuppressWarnings("unchecked") + TransformIterator transformIterator = + new TransformIterator( + fileIterator, + remotePath -> { + AbstractBackupPath abstractBackupPath = pathProvider.get(); + abstractBackupPath.parseRemote(remotePath.toString()); + return abstractBackupPath; + }); + + return new FilterIterator<>( + transformIterator, + abstractBackupPath -> + (abstractBackupPath.getTime().after(start) + && abstractBackupPath.getTime().before(till)) + || abstractBackupPath.getTime().equals(start)); + } + + @Override + public final void addObserver(EventObserver observer) { + if (observer == null) throw new NullPointerException("observer must not be null."); + + observers.addIfAbsent(observer); + } + + @Override + public void removeObserver(EventObserver observer) { + if (observer == null) throw new NullPointerException("observer must not be null."); + + observers.remove(observer); + } + + @Override + public void notifyEventStart(BackupEvent event) { + observers.forEach(eventObserver -> eventObserver.updateEventStart(event)); + } + + @Override + public void notifyEventSuccess(BackupEvent event) { + observers.forEach(eventObserver -> eventObserver.updateEventSuccess(event)); + } + + @Override + public void notifyEventFailure(BackupEvent event) { + observers.forEach(eventObserver -> eventObserver.updateEventFailure(event)); + } + + @Override + public void notifyEventStop(BackupEvent event) { + observers.forEach(eventObserver -> eventObserver.updateEventStop(event)); + } + + @Override + public int getUploadTasksQueued() { + return tasksQueued.size(); + } + + @Override + public int getDownloadTasksQueued() { + return fileDownloadExecutor.getQueue().size(); + } + + @Override + public void clearCache() { + objectCache.invalidateAll(); + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java b/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java new file mode 100644 index 000000000..4b301e321 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupDynamicRateLimiter.java @@ -0,0 +1,52 @@ +package com.netflix.priam.backup; + +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.RateLimiter; +import com.netflix.priam.config.IConfiguration; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import javax.inject.Inject; + +public class BackupDynamicRateLimiter implements DynamicRateLimiter { + + private final Clock clock; + private final IConfiguration config; + private final DirectorySize dirSize; + private final RateLimiter rateLimiter; + + @Inject + BackupDynamicRateLimiter(IConfiguration config, Clock clock, DirectorySize dirSize) { + this.clock = clock; + this.config = config; + this.dirSize = dirSize; + this.rateLimiter = RateLimiter.create(Double.MAX_VALUE); + } + + @Override + public void acquire(AbstractBackupPath path, Instant target, int permits) { + if (target.equals(Instant.EPOCH) + || !path.getBackupFile() + .getAbsolutePath() + .contains(AbstractBackup.SNAPSHOT_FOLDER)) { + return; + } + long secondsRemaining = Duration.between(clock.instant(), target).getSeconds(); + if (secondsRemaining < 1) { + // skip file system checks when unnecessary + return; + } + int backupThreads = config.getBackupThreads(); + Preconditions.checkState(backupThreads > 0); + long bytesPerThread = this.dirSize.getBytes(config.getDataFileLocation()) / backupThreads; + if (bytesPerThread < 1) { + return; + } + double newRate = (double) bytesPerThread / secondsRemaining; + double oldRate = rateLimiter.getRate(); + if ((Math.abs(newRate - oldRate) / oldRate) > config.getRateLimitChangeThreshold()) { + rateLimiter.setRate(newRate); + } + rateLimiter.acquire(permits); + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java deleted file mode 100644 index 075677523..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemAdapter.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup; - -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Date; -import java.util.Iterator; - -public abstract class BackupFileSystemAdapter implements IBackupFileSystem { - - public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException { - } - - public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException { - } - - public Iterator list(String path, Date start, Date till) { - return null; - } - - public Iterator listPrefixes(Date date) { - return null; - } - - public void cleanup() { - } - - public int getActivecount() { - return 0; - } - - public void shutdown() { - } -} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java index 0db3d72df..c8b319258 100755 --- a/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupFileSystemContext.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; @@ -24,7 +22,9 @@ public class BackupFileSystemContext implements IFileSystemContext { private IBackupFileSystem fs = null, encryptedFs = null; @Inject - public BackupFileSystemContext(@Named("backup") IBackupFileSystem fs, @Named("encryptedbackup") IBackupFileSystem encryptedFs) { + public BackupFileSystemContext( + @Named("backup") IBackupFileSystem fs, + @Named("encryptedbackup") IBackupFileSystem encryptedFs) { this.fs = fs; this.encryptedFs = encryptedFs; @@ -41,4 +41,4 @@ public IBackupFileSystem getFileStrategy(IConfiguration config) { return this.encryptedFs; } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java b/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java new file mode 100644 index 000000000..14a4cf6b1 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupFolder.java @@ -0,0 +1,18 @@ +package com.netflix.priam.backup; + +import java.util.Arrays; +import java.util.Optional; + +public enum BackupFolder { + SNAPSHOTS("snapshots"), + BACKUPS("backups"); + private String name; + + BackupFolder(String name) { + this.name = name; + } + + public static Optional fromName(String name) { + return Arrays.stream(values()).filter(b -> b.name.equals(name)).findFirst(); + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java b/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java new file mode 100644 index 000000000..38a1e98b1 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupHelper.java @@ -0,0 +1,29 @@ +package com.netflix.priam.backup; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.inject.ImplementedBy; +import java.io.File; +import java.io.IOException; +import java.time.Instant; + +@ImplementedBy(BackupHelperImpl.class) +public interface BackupHelper { + + default ImmutableList> uploadAndDeleteAllFiles( + final File parent, final AbstractBackupPath.BackupFileType type, boolean async) + throws Exception { + return uploadAndDeleteAllFiles(parent, type, Instant.EPOCH, async); + } + + ImmutableList> uploadAndDeleteAllFiles( + final File parent, + final AbstractBackupPath.BackupFileType type, + Instant target, + boolean async) + throws Exception; + + ImmutableSet getBackupPaths( + File dir, AbstractBackupPath.BackupFileType type) throws IOException; +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java b/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java new file mode 100644 index 000000000..c51de5e0a --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupHelperImpl.java @@ -0,0 +1,112 @@ +package com.netflix.priam.backup; + +import static java.util.stream.Collectors.toSet; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.config.BackupsToCompress; +import com.netflix.priam.config.IConfiguration; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import java.util.Set; +import java.util.stream.Stream; + +public class BackupHelperImpl implements BackupHelper { + + private static final String COMPRESSION_SUFFIX = "-CompressionInfo.db"; + private static final String DATA_SUFFIX = "-Data.db"; + private final Provider pathFactory; + private final IBackupFileSystem fs; + private final IConfiguration config; + + @Inject + public BackupHelperImpl( + IConfiguration config, + IFileSystemContext backupFileSystemCtx, + Provider pathFactory) { + this.config = config; + this.pathFactory = pathFactory; + this.fs = backupFileSystemCtx.getFileStrategy(config); + } + + /** + * Upload files in the specified dir. Does not delete the file in case of error. The files are + * uploaded serially or async based on flag provided. + * + * @param parent Parent dir + * @param type Type of file (META, SST, SNAP etc) + * @param async Upload the file(s) in async fashion if enabled. + * @return List of files that are successfully uploaded as part of backup + * @throws Exception when there is failure in uploading files. + */ + @Override + public ImmutableList> uploadAndDeleteAllFiles( + final File parent, + final AbstractBackupPath.BackupFileType type, + Instant target, + boolean async) + throws Exception { + final ImmutableList.Builder> futures = + ImmutableList.builder(); + for (AbstractBackupPath bp : getBackupPaths(parent, type)) { + futures.add(fs.uploadAndDelete(bp, target, async)); + } + return futures.build(); + } + + @Override + public ImmutableSet getBackupPaths( + File dir, AbstractBackupPath.BackupFileType type) throws IOException { + Set files; + try (Stream pathStream = Files.list(dir.toPath())) { + files = pathStream.map(Path::toFile).filter(File::isFile).collect(toSet()); + } + Set compressedFilePrefixes = + files.stream() + .map(File::getName) + .filter(name -> name.endsWith(COMPRESSION_SUFFIX)) + .map(name -> name.substring(0, name.lastIndexOf('-'))) + .collect(toSet()); + final ImmutableSet.Builder bps = ImmutableSet.builder(); + ImmutableSet.Builder dataFiles = ImmutableSet.builder(); + for (File file : files) { + final AbstractBackupPath bp = pathFactory.get(); + bp.parseLocal(file, type); + bp.setCompression(getCorrectCompressionAlgorithm(bp, compressedFilePrefixes)); + (file.getAbsolutePath().endsWith(DATA_SUFFIX) ? dataFiles : bps).add(bp); + } + bps.addAll(dataFiles.build()); + return bps.build(); + } + + private CompressionType getCorrectCompressionAlgorithm( + AbstractBackupPath path, Set compressedFiles) { + if (!AbstractBackupPath.BackupFileType.isV2(path.getType()) + || path.getLastModified().toEpochMilli() + < config.getCompressionTransitionEpochMillis()) { + return CompressionType.SNAPPY; + } + String file = path.getFileName(); + BackupsToCompress which = config.getBackupsToCompress(); + switch (which) { + case NONE: + return CompressionType.NONE; + case ALL: + return CompressionType.SNAPPY; + case IF_REQUIRED: + int splitIndex = file.lastIndexOf('-'); + return splitIndex >= 0 && compressedFiles.contains(file.substring(0, splitIndex)) + ? CompressionType.NONE + : CompressionType.SNAPPY; + default: + throw new IllegalArgumentException("NONE, ALL, UNCOMPRESSED only. Saw: " + which); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java b/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java index e6101c796..295078721 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupMetadata.java @@ -1,52 +1,51 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; - import com.netflix.priam.utils.DateUtil; import com.netflix.priam.utils.GsonJsonSerializer; +import java.io.Serializable; +import java.util.Date; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Serializable; -import java.util.Date; - -/** - * POJO to encapsulate the metadata for a snapshot - * Created by aagrawal on 1/31/17. - */ - -final public class BackupMetadata implements Serializable { +/** POJO to encapsulate the metadata for a snapshot Created by aagrawal on 1/31/17. */ +public final class BackupMetadata implements Serializable { private static final Logger logger = LoggerFactory.getLogger(BackupMetadata.class); private String snapshotDate; private String token; private Date start, completed; private Status status; + private boolean cassandraSnapshotSuccess; + private Date lastValidated; + private BackupVersion backupVersion; private String snapshotLocation; - public BackupMetadata(String token, Date start) throws Exception { + public BackupMetadata(BackupVersion backupVersion, String token, Date start) throws Exception { if (start == null || token == null || StringUtils.isEmpty(token)) - throw new Exception(String.format("Invalid Input: Token: {} or start date:{} is null or empty.", token, start)); - + throw new Exception( + String.format( + "Invalid Input: Token: %s or start date: %s is null or empty.", + token, start)); + this.backupVersion = backupVersion; this.snapshotDate = DateUtil.formatyyyyMMdd(start); this.token = token; this.start = start; this.status = Status.STARTED; + this.cassandraSnapshotSuccess = false; } @Override @@ -56,9 +55,10 @@ public boolean equals(Object o) { BackupMetadata that = (BackupMetadata) o; - if (!this.snapshotDate.equals(that.snapshotDate)) return false; - if (!this.token.equals(that.token)) return false; - return this.start.equals(that.start); + return this.snapshotDate.equals(that.snapshotDate) + && this.token.equals(that.token) + && this.start.equals(that.start) + && this.backupVersion.equals(that.backupVersion); } @Override @@ -66,6 +66,7 @@ public int hashCode() { int result = this.snapshotDate.hashCode(); result = 31 * result + this.token.hashCode(); result = 31 * result + this.start.hashCode(); + result = 31 * result + this.backupVersion.hashCode(); return result; } @@ -101,7 +102,6 @@ public Date getStart() { * * @return completion date of snapshot. */ - public Date getCompleted() { return this.completed; } @@ -151,9 +151,56 @@ public void setSnapshotLocation(String snapshotLocation) { this.snapshotLocation = snapshotLocation; } + /** + * Find if cassandra snapshot was successful or not. This is a JMX operation and it is possible + * that this operation failed. + * + * @return cassandra snapshot status. + */ + public boolean isCassandraSnapshotSuccess() { + return cassandraSnapshotSuccess; + } + + /** + * Set the cassandra snapshot status to be either finished successfully or fail. + * + * @param cassandraSnapshotSuccess is set to success if JMX operation for snapshot is + * successful. + */ + public void setCassandraSnapshotSuccess(boolean cassandraSnapshotSuccess) { + this.cassandraSnapshotSuccess = cassandraSnapshotSuccess; + } + + /** + * Get the backup version for the snapshot. + * + * @return backup version of the snapshot. + */ + public BackupVersion getBackupVersion() { + return backupVersion; + } + + /** + * Return the last validation timestamp of this backup metadata. Validation of backup implies + * finding if all the files are successfully stored in remote file system. + * + * @return date of last backup validation. + */ + public Date getLastValidated() { + return lastValidated; + } + + /** + * Set the last validation date of backup metadata. + * + * @param lastValidated date value of backup validation. + */ + public void setLastValidated(Date lastValidated) { + this.lastValidated = lastValidated; + } + @Override - public String toString() - { + public String toString() { return GsonJsonSerializer.getGson().toJson(this); } } diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java index 8f5321154..356ad2d7a 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreException.java @@ -27,5 +27,4 @@ public BackupRestoreException(String message) { public BackupRestoreException(String message, Exception e) { super(message, e); } - } diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java index 37fcc9179..2293a2694 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupRestoreUtil.java @@ -19,179 +19,169 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; +import com.google.inject.Provider; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.backupv2.MetaV2Proxy; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.time.Instant; +import java.util.*; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Created by aagrawal on 8/14/17. - */ +/** Created by aagrawal on 8/14/17. */ public class BackupRestoreUtil { private static final Logger logger = LoggerFactory.getLogger(BackupRestoreUtil.class); - private static final String JOBNAME = "BackupRestoreUtil"; - - private final Map> columnFamilyFilter = new HashMap<>(); //key: keyspace, value: a list of CFs within the keyspace - private final Map keyspaceFilter = new HashMap<>(); //key: keyspace, value: null + private static final Pattern columnFamilyFilterPattern = Pattern.compile(".\\.."); + private Map> includeFilter; + private Map> excludeFilter; - private final Pattern columnFamilyFilterPattern = Pattern.compile(".\\.."); - private String configKeyspaceFilter; - private String configColumnfamilyFilter; - - public static final List FILTER_KEYSPACE = Arrays.asList("OpsCenter"); - private static final Map> FILTER_COLUMN_FAMILY = ImmutableMap.of("system", Arrays.asList("local", "peers", "compactions_in_progress", "LocationInfo")); + public static final List FILTER_KEYSPACE = Collections.singletonList("OpsCenter"); + private static final Map> FILTER_COLUMN_FAMILY = + ImmutableMap.of( + "system", + Arrays.asList( + "local", "peers", "hints", "compactions_in_progress", "LocationInfo")); @Inject - public BackupRestoreUtil(String configKeyspaceFilter, String configColumnfamilyFilter) { - setFilters(configKeyspaceFilter, configColumnfamilyFilter); + public BackupRestoreUtil(String configIncludeFilter, String configExcludeFilter) { + setFilters(configIncludeFilter, configExcludeFilter); } - private BackupRestoreUtil setFilters(String configKeyspaceFilter, String configColumnfamilyFilter) { - this.configColumnfamilyFilter = configColumnfamilyFilter; - this.configKeyspaceFilter = configKeyspaceFilter; - populateFilters(); + public BackupRestoreUtil setFilters(String configIncludeFilter, String configExcludeFilter) { + includeFilter = getFilter(configIncludeFilter); + excludeFilter = getFilter(configExcludeFilter); + logger.info("Exclude filter set: {}", configExcludeFilter); + logger.info("Include filter set: {}", configIncludeFilter); return this; } - /** - * Search for "1:* alphanumeric chars including special chars""literal period"" 1:* alphanumeric chars including special chars" - * - * @param cfFilter input string - * @return true if input string matches search pattern; otherwise, false - */ - private final boolean isValidCFFilterFormat(String cfFilter) { - return columnFamilyFilterPattern.matcher(cfFilter).find(); - } + public static Optional getLatestValidMetaPath( + IMetaProxy metaProxy, DateUtil.DateRange dateRange) { + // Get a list of manifest files. + List metas = metaProxy.findMetaFiles(dateRange); - /** - * Populate the filters for backup/restore as configured for internal use. - */ - private final void populateFilters() { - //Clear the filters as we will (re)populate the filters. - keyspaceFilter.clear(); - columnFamilyFilter.clear(); - - if (configKeyspaceFilter == null || configKeyspaceFilter.isEmpty()) { - logger.info("No keyspace filter set for {}.", JOBNAME); - } else { - String[] keyspaces = configKeyspaceFilter.split(","); - for (int i = 0; i < keyspaces.length; i++) { - logger.info("Adding {} keyspace filter: {}", JOBNAME, keyspaces[i]); - this.keyspaceFilter.put(keyspaces[i], null); + // Find a valid manifest file. + for (AbstractBackupPath meta : metas) { + BackupVerificationResult result = metaProxy.isMetaFileValid(meta); + if (result.valid) { + return Optional.of(meta); } - } - if (configColumnfamilyFilter == null || configColumnfamilyFilter.isEmpty()) { - - logger.info("No column family filter set for {}.", JOBNAME); - - } else { + return Optional.empty(); + } - String[] filters = configColumnfamilyFilter.split(","); - for (int i = 0; i < filters.length; i++) { //process each filter - if (isValidCFFilterFormat(filters[i])) { + public static List getAllFiles( + AbstractBackupPath latestValidMetaFile, + DateUtil.DateRange dateRange, + IMetaProxy metaProxy, + Provider pathProvider) + throws Exception { + // Download the meta.json file. + Path metaFile = metaProxy.downloadMetaFile(latestValidMetaFile); + // Parse meta.json file to find the files required to download from this snapshot. + List allFiles = + metaProxy + .getSSTFilesFromMeta(metaFile) + .stream() + .map( + value -> { + AbstractBackupPath path = pathProvider.get(); + path.parseRemote(value); + return path; + }) + .collect(Collectors.toList()); + + FileUtils.deleteQuietly(metaFile.toFile()); + + // Download incremental SSTables after the snapshot meta file. + Instant snapshotTime; + if (metaProxy instanceof MetaV2Proxy) snapshotTime = latestValidMetaFile.getLastModified(); + else snapshotTime = latestValidMetaFile.getTime().toInstant(); + + DateUtil.DateRange incrementalDateRange = + new DateUtil.DateRange(snapshotTime, dateRange.getEndTime()); + Iterator incremental = metaProxy.getIncrementals(incrementalDateRange); + while (incremental.hasNext()) allFiles.add(incremental.next()); + + return allFiles; + } - String[] filter = filters[i].split("\\."); - String ksName = filter[0]; - String cfName = filter[1]; - logger.info("Adding {} CF filter: {}.{}", JOBNAME, ksName, cfName); + public static final Map> getFilter(String inputFilter) + throws IllegalArgumentException { + if (StringUtils.isEmpty(inputFilter)) return null; - if (this.columnFamilyFilter.containsKey(ksName)) { - //add cf to existing filter - List columnfamilies = this.columnFamilyFilter.get(ksName); - columnfamilies.add(cfName); - this.columnFamilyFilter.put(ksName, columnfamilies); + final Map> columnFamilyFilter = + new HashMap<>(); // key: keyspace, value: a list of CFs within the keyspace - } else { + String[] filters = inputFilter.split(","); + for (String cfFilter : + filters) { // process filter of form keyspace.* or keyspace.columnfamily + if (columnFamilyFilterPattern.matcher(cfFilter).find()) { - List cfs = new ArrayList(); - cfs.add(cfName); - this.columnFamilyFilter.put(ksName, cfs); + String[] filter = cfFilter.split("\\."); + String keyspaceName = filter[0]; + String columnFamilyName = filter[1]; - } + if (columnFamilyName.contains("-")) + columnFamilyName = columnFamilyName.substring(0, columnFamilyName.indexOf("-")); - } else { - throw new IllegalArgumentException("Column family filter format is not valid. Format needs to be \"keyspace.columnfamily\". Invalid input: " + filters[i]); - } - } //end processing each filter + List existingCfs = + columnFamilyFilter.getOrDefault(keyspaceName, new ArrayList<>()); + if (!columnFamilyName.equalsIgnoreCase("*")) existingCfs.add(columnFamilyName); + columnFamilyFilter.put(keyspaceName, existingCfs); + } else { + throw new IllegalArgumentException( + "Column family filter format is not valid. Format needs to be \"keyspace.columnfamily\". Invalid input: " + + cfFilter); + } } + return columnFamilyFilter; } /** * Returns if provided keyspace and/or columnfamily is filtered for backup or restore. + * * @param keyspace name of the keyspace in consideration * @param columnFamilyDir name of the columnfamily directory in consideration * @return true if directory should be filter from processing; otherwise, false. */ - public final boolean isFiltered(String keyspace, String columnFamilyDir){ - if (StringUtils.isEmpty(keyspace) || StringUtils.isEmpty(columnFamilyDir)) - return false; + public final boolean isFiltered(String keyspace, String columnFamilyDir) { + if (StringUtils.isEmpty(keyspace) || StringUtils.isEmpty(columnFamilyDir)) return false; String columnFamilyName = columnFamilyDir.split("-")[0]; - //column family is in list of global CF filter - if (FILTER_COLUMN_FAMILY.containsKey(keyspace) && FILTER_COLUMN_FAMILY.get(keyspace).contains(columnFamilyName)) - return true; - - if (isFiltered(BackupRestoreUtil.DIRECTORYTYPE.KEYSPACE, keyspace) || //keyspace is filtered - isFiltered(BackupRestoreUtil.DIRECTORYTYPE.CF, keyspace, columnFamilyDir) //columnfamily is filtered - ) { - logger.debug("Skipping: keyspace: {}, CF: {} is part of filter list.", keyspace, columnFamilyName); - return true; - } - - return false; - } - - /** - * @param directoryType keyspace or columnfamily directory type. - * @return true if directory should be filter from processing; otherwise, false. - */ - private final boolean isFiltered(DIRECTORYTYPE directoryType, String... args) { - - if (directoryType.equals(DIRECTORYTYPE.KEYSPACE)) { //start with filtering the parent (keyspace) - //Apply each keyspace filter to input string - String keyspaceName = args[0]; - - java.util.Set ksFilters = keyspaceFilter.keySet(); - Iterator it = ksFilters.iterator(); - while (it.hasNext()) { - String ksFilter = it.next(); - Pattern pattern = Pattern.compile(ksFilter); - Matcher matcher = pattern.matcher(keyspaceName); - if (matcher.find()) { - logger.debug("Keyspace: {} matched filter: {}", keyspaceName, ksFilter); - return true; - } + // column family is in list of global CF filter + if (FILTER_COLUMN_FAMILY.containsKey(keyspace) + && FILTER_COLUMN_FAMILY.get(keyspace).contains(columnFamilyName)) return true; + + if (excludeFilter != null) + if (excludeFilter.containsKey(keyspace) + && (excludeFilter.get(keyspace).isEmpty() + || excludeFilter.get(keyspace).contains(columnFamilyName))) { + logger.debug( + "Skipping: keyspace: {}, CF: {} is part of exclude list.", + keyspace, + columnFamilyName); + return true; } - } - if (directoryType.equals(DIRECTORYTYPE.CF)) { //parent (keyspace) is not filtered, now see if the child (CF) is filtered - String keyspaceName = args[0]; - if (!columnFamilyFilter.containsKey(keyspaceName)) { - return false; + if (includeFilter != null) + if (!(includeFilter.containsKey(keyspace) + && (includeFilter.get(keyspace).isEmpty() + || includeFilter.get(keyspace).contains(columnFamilyName)))) { + logger.debug( + "Skipping: keyspace: {}, CF: {} is not part of include list.", + keyspace, + columnFamilyName); + return true; } - String cfName = args[1]; - List cfsFilter = columnFamilyFilter.get(keyspaceName); - for (int i = 0; i < cfsFilter.size(); i++) { - Pattern pattern = Pattern.compile(cfsFilter.get(i)); - Matcher matcher = pattern.matcher(cfName); - if (matcher.find()) { - logger.debug("{}.{} matched filter", keyspaceName, cfName); - return true; - } - } - } - - return false; //if here, current input are not part of keyspae and cf filters - } - - public enum DIRECTORYTYPE { - KEYSPACE, CF + return false; } } diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupService.java b/priam/src/main/java/com/netflix/priam/backup/BackupService.java new file mode 100644 index 000000000..64c750f8d --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupService.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.inject.Inject; +import com.netflix.priam.aws.UpdateCleanupPolicy; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.tuner.CassandraTunerService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Encapsulate the backup service 1.0 - Execute all the tasks required to run backup service. + * + *

Created by aagrawal on 3/9/19. + */ +public class BackupService implements IService { + private final PriamScheduler scheduler; + private final IConfiguration config; + private final IBackupRestoreConfig backupRestoreConfig; + private final CassandraTunerService cassandraTunerService; + private static final Logger logger = LoggerFactory.getLogger(BackupService.class); + + @Inject + public BackupService( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + PriamScheduler priamScheduler, + CassandraTunerService cassandraTunerService) { + this.config = config; + this.backupRestoreConfig = backupRestoreConfig; + this.scheduler = priamScheduler; + this.cassandraTunerService = cassandraTunerService; + } + + @Override + public void scheduleService() throws Exception { + // Start the snapshot backup schedule - Always run this. (If you want to + // set it off, set backup hour to -1) or set backup cron to "-1" + TaskTimer snapshotTimer = SnapshotBackup.getTimer(config); + scheduleTask(scheduler, SnapshotBackup.class, snapshotTimer); + + if (snapshotTimer != null) { + // Set cleanup + scheduleTask(scheduler, UpdateCleanupPolicy.class, UpdateCleanupPolicy.getTimer()); + // Schedule commit log task + scheduleTask( + scheduler, CommitLogBackupTask.class, CommitLogBackupTask.getTimer(config)); + } + + // Start the Incremental backup schedule if enabled + scheduleTask( + scheduler, + IncrementalBackup.class, + IncrementalBackup.getTimer(config, backupRestoreConfig)); + } + + @Override + public void updateServicePre() throws Exception { + // Run the task to tune Cassandra + cassandraTunerService.onChangeUpdateService(); + } + + @Override + public void updateServicePost() throws Exception {} +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java b/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java index f645901a3..36489e022 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupStatusMgr.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; @@ -20,12 +18,14 @@ import com.netflix.priam.health.InstanceState; import com.netflix.priam.utils.DateUtil; import com.netflix.priam.utils.MaxSizeHashMap; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.*; +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.*; - /* * A means to manage metadata for various types of backups (snapshots, incrementals) */ @@ -35,17 +35,19 @@ public abstract class BackupStatusMgr implements IBackupStatusMgr { private static final Logger logger = LoggerFactory.getLogger(BackupStatusMgr.class); /** - * Map: Map of completed snapshots represented by its snapshot day (yyyymmdd) - * and a list of snapshots started on that day - * Note: A {@link LinkedList} was chosen for fastest retrieval of latest snapshot. + * Map: Map of completed snapshots represented by its + * snapshot day (yyyymmdd) and a list of snapshots started on that day Note: A {@link + * LinkedList} was chosen for fastest retrieval of latest snapshot. */ Map> backupMetadataMap; - int capacity; - private InstanceState instanceState; + + final int capacity; + private final InstanceState instanceState; /** * @param capacity Capacity to hold in-memory snapshot status days. - * @param instanceState Status of the instance encapsulating health and other metadata of Priam and Cassandra. + * @param instanceState Status of the instance encapsulating health and other metadata of Priam + * and Cassandra. */ @Inject public BackupStatusMgr(int capacity, InstanceState instanceState) { @@ -73,16 +75,14 @@ public LinkedList locate(Date snapshotDate) { @Override public LinkedList locate(String snapshotDate) { - if (StringUtils.isEmpty(snapshotDate)) - return null; + if (StringUtils.isEmpty(snapshotDate)) return null; // See if in memory - if (backupMetadataMap.containsKey(snapshotDate)) - return backupMetadataMap.get(snapshotDate); + if (backupMetadataMap.containsKey(snapshotDate)) return backupMetadataMap.get(snapshotDate); LinkedList metadataLinkedList = fetch(snapshotDate); - //Save the result in local cache so we don't hit data store/file. + // Save the result in local cache so we don't hit data store/file. backupMetadataMap.put(snapshotDate, metadataLinkedList); return metadataLinkedList; @@ -99,64 +99,75 @@ public void start(BackupMetadata backupMetadata) { metadataLinkedList.addFirst(backupMetadata); backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList); instanceState.setBackupStatus(backupMetadata); - //Save the backupMetaDataMap + // Save the backupMetaDataMap save(backupMetadata); } @Override public void finish(BackupMetadata backupMetadata) { - //validate that it has actually finished. If not, then set the status and current date. + // validate that it has actually finished. If not, then set the status and current date. if (backupMetadata.getStatus() != Status.FINISHED) backupMetadata.setStatus(Status.FINISHED); if (backupMetadata.getCompleted() == null) - backupMetadata.setCompleted(Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime()); + backupMetadata.setCompleted( + Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime()); instanceState.setBackupStatus(backupMetadata); + update(backupMetadata); + } - //Retrieve the snapshot metadata and then update the finish date/status. + @Override + public void update(BackupMetadata backupMetadata) { + // Retrieve the snapshot metadata and then update the finish date/status. retrieveAndUpdate(backupMetadata); - //Save the backupMetaDataMap + // Save the backupMetaDataMap save(backupMetadata); - } private void retrieveAndUpdate(final BackupMetadata backupMetadata) { - //Retrieve the snapshot metadata and then update the date/status. + // Retrieve the snapshot metadata and then update the date/status. LinkedList metadataLinkedList = locate(backupMetadata.getSnapshotDate()); - if (metadataLinkedList == null || metadataLinkedList.isEmpty()) { - logger.error("No previous backupMetaData found. This should not happen. Creating new to ensure app keeps running."); + if (metadataLinkedList == null) { + logger.error( + "No previous backupMetaData found. This should not happen. Creating new to ensure app keeps running."); metadataLinkedList = new LinkedList<>(); - metadataLinkedList.addFirst(backupMetadata); + backupMetadataMap.put(backupMetadata.getSnapshotDate(), metadataLinkedList); } - metadataLinkedList.forEach(backupMetadata1 -> { - if (backupMetadata1.equals(backupMetadata)) { - backupMetadata1.setCompleted(backupMetadata.getCompleted()); - backupMetadata1.setStatus(backupMetadata.getStatus()); - } - }); + Optional searchedData = + metadataLinkedList + .stream() + .filter(backupMetadata1 -> backupMetadata.equals(backupMetadata1)) + .findFirst(); + if (!searchedData.isPresent()) { + metadataLinkedList.addFirst(backupMetadata); + } + searchedData.ifPresent( + backupMetadata1 -> { + backupMetadata1.setCompleted(backupMetadata.getCompleted()); + backupMetadata1.setStatus(backupMetadata.getStatus()); + backupMetadata1.setCassandraSnapshotSuccess( + backupMetadata.isCassandraSnapshotSuccess()); + backupMetadata1.setSnapshotLocation(backupMetadata.getSnapshotLocation()); + backupMetadata1.setLastValidated(backupMetadata.getLastValidated()); + }); } @Override public void failed(BackupMetadata backupMetadata) { - //validate that it has actually failed. If not, then set the status and current date. + // validate that it has actually failed. If not, then set the status and current date. if (backupMetadata.getCompleted() == null) - backupMetadata.setCompleted(Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime()); + backupMetadata.setCompleted( + Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime()); - //Set this later to ensure the status - if (backupMetadata.getStatus() != Status.FAILED) - backupMetadata.setStatus(Status.FAILED); + // Set this later to ensure the status + if (backupMetadata.getStatus() != Status.FAILED) backupMetadata.setStatus(Status.FAILED); instanceState.setBackupStatus(backupMetadata); - - //Retrieve the snapshot metadata and then update the failure date/status. - retrieveAndUpdate(backupMetadata); - - //Save the backupMetaDataMap - save(backupMetadata); + update(backupMetadata); } /** @@ -170,16 +181,59 @@ public void failed(BackupMetadata backupMetadata) { * Implementation on how to retrieve the backup metadata(s) for a given date from store. * * @param snapshotDate Snapshot date to be retrieved from datastore in format of yyyyMMdd - * @return The list of snapshots started on the snapshot day in descending order of snapshot start time. + * @return The list of snapshots started on the snapshot day in descending order of snapshot + * start time. */ protected abstract LinkedList fetch(String snapshotDate); + public List getLatestBackupMetadata( + BackupVersion backupVersion, DateUtil.DateRange dateRange) { + Instant startDay = dateRange.getStartTime().truncatedTo(ChronoUnit.DAYS); + Instant endDay = dateRange.getEndTime().truncatedTo(ChronoUnit.DAYS); + + List allBackups = new ArrayList<>(); + Instant previousDay = endDay; + do { + // We need to find the latest backupmetadata in this date range. + logger.info( + "Will try to find snapshot for : {}", + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, previousDay)); + List backupsForDate = locate(new Date(previousDay.toEpochMilli())); + if (backupsForDate != null) allBackups.addAll(backupsForDate); + previousDay = previousDay.minus(1, ChronoUnit.DAYS); + } while (!previousDay.isBefore(startDay)); + + // Return all the backups which are FINISHED and were "started" in the dateRange provided. + // Do not compare the end time of snapshot as it may take random amount of time to finish + // the snapshot. + return allBackups + .stream() + .filter(Objects::nonNull) + .filter(backupMetadata -> backupMetadata.getStatus() == Status.FINISHED) + .filter(backupMetadata -> backupMetadata.getBackupVersion().equals(backupVersion)) + .filter( + backupMetadata -> + backupMetadata + .getStart() + .toInstant() + .compareTo(dateRange.getStartTime()) + >= 0 + && backupMetadata + .getStart() + .toInstant() + .compareTo(dateRange.getEndTime()) + <= 0) + .sorted(Comparator.comparing(BackupMetadata::getStart).reversed()) + .collect(Collectors.toList()); + } + @Override public String toString() { - final StringBuffer sb = new StringBuffer("BackupStatusMgr{"); - sb.append("backupMetadataMap=").append(backupMetadataMap); - sb.append(", capacity=").append(capacity); - sb.append('}'); - return sb.toString(); + return "BackupStatusMgr{" + + "backupMetadataMap=" + + backupMetadataMap + + ", capacity=" + + capacity + + '}'; } } diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java b/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java index 03c896f3e..3dfc1bdfb 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupVerification.java @@ -1,152 +1,165 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; import com.google.inject.Inject; +import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.scheduler.UnsupportedTypeException; import com.netflix.priam.utils.DateUtil; -import org.apache.commons.collections4.CollectionUtils; -import org.json.simple.parser.JSONParser; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileOutputStream; -import java.io.FileReader; -import java.nio.file.FileSystems; +import com.netflix.priam.utils.DateUtil.DateRange; import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; import java.util.*; -import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Created by aagrawal on 2/16/17. - * This class validates the backup by doing listing of files in the backup destination and comparing with meta.json by downloading from the location. - * Input: BackupMetadata that needs to be verified. - * Since one backupmetadata can have multiple start time, provide one startTime if interested in verifying one particular backup. - * Leave startTime as null to get the latest snapshot for the provided BackupMetadata. + * Created by aagrawal on 2/16/17. This class validates the backup by doing listing of files in the + * backup destination and comparing with meta.json by downloading from the location. Input: + * BackupMetadata that needs to be verified. */ @Singleton public class BackupVerification { private static final Logger logger = LoggerFactory.getLogger(BackupVerification.class); - private IBackupFileSystem bkpStatusFs; - private IConfiguration config; + private final IMetaProxy metaV1Proxy; + private final IMetaProxy metaV2Proxy; + private final IBackupStatusMgr backupStatusMgr; + private final Provider abstractBackupPathProvider; + private BackupVerificationResult latestResult; @Inject - BackupVerification(@Named("backup_status") IBackupFileSystem bkpStatusFs, IConfiguration config) { - this.bkpStatusFs = bkpStatusFs; - this.config = config; + BackupVerification( + @Named("v1") IMetaProxy metaV1Proxy, + @Named("v2") IMetaProxy metaV2Proxy, + IBackupStatusMgr backupStatusMgr, + Provider abstractBackupPathProvider) { + this.metaV1Proxy = metaV1Proxy; + this.metaV2Proxy = metaV2Proxy; + this.backupStatusMgr = backupStatusMgr; + this.abstractBackupPathProvider = abstractBackupPathProvider; } - public BackupVerificationResult verifyBackup(List metadata, Date startTime) { - BackupVerificationResult result = new BackupVerificationResult(); + public IMetaProxy getMetaProxy(BackupVersion backupVersion) { + switch (backupVersion) { + case SNAPSHOT_BACKUP: + return metaV1Proxy; + case SNAPSHOT_META_SERVICE: + return metaV2Proxy; + } - if (metadata == null || metadata.isEmpty()) - return result; + return null; + } - result.snapshotAvailable = true; - // All the dates should be same. - result.selectedDate = metadata.get(0).getSnapshotDate(); + public Optional verifyBackup( + BackupVersion backupVersion, boolean force, DateRange dateRange) + throws UnsupportedTypeException, IllegalArgumentException { + IMetaProxy metaProxy = getMetaProxy(backupVersion); + if (metaProxy == null) { + throw new UnsupportedTypeException( + "BackupVersion type: " + backupVersion + " is not supported"); + } - List backups = metadata.stream().map(backupMetadata -> - DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart())).collect(Collectors.toList()); - logger.info("Snapshots found for {} : [{}]", result.selectedDate, backups); + if (dateRange == null) { + throw new IllegalArgumentException("dateRange provided is null"); + } - //find the latest date (default) or verify if one provided - Date latestDate = null; + List metadata = + backupStatusMgr.getLatestBackupMetadata(backupVersion, dateRange); + if (metadata == null || metadata.isEmpty()) return Optional.empty(); for (BackupMetadata backupMetadata : metadata) { - if (latestDate == null || latestDate.before(backupMetadata.getStart())) - latestDate = backupMetadata.getStart(); - - if (startTime != null && - DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart()).equals(DateUtil.formatyyyyMMddHHmm(startTime))) { - latestDate = startTime; - break; + if (backupMetadata.getLastValidated() != null && !force) { + // Backup is already validated. Nothing to do. + latestResult = new BackupVerificationResult(); + latestResult.valid = true; + latestResult.manifestAvailable = true; + latestResult.snapshotInstant = backupMetadata.getStart().toInstant(); + Path snapshotLocation = Paths.get(backupMetadata.getSnapshotLocation()); + latestResult.remotePath = + snapshotLocation.subpath(1, snapshotLocation.getNameCount()).toString(); + return Optional.of(latestResult); + } + BackupVerificationResult backupVerificationResult = + verifyBackup(metaProxy, backupMetadata); + if (logger.isDebugEnabled()) + logger.debug( + "BackupVerification: metadata: {}, result: {}", + backupMetadata, + backupVerificationResult); + if (backupVerificationResult.valid) { + backupMetadata.setLastValidated(new Date(DateUtil.getInstant().toEpochMilli())); + backupStatusMgr.update(backupMetadata); + latestResult = backupVerificationResult; + return Optional.of(backupVerificationResult); } } + latestResult = null; + return Optional.empty(); + } - result.snapshotTime = DateUtil.formatyyyyMMddHHmm(latestDate); - logger.info("Latest/Requested snapshot date found: {}, for selected/provided date: {}", result.snapshotTime, result.selectedDate); - - //Get Backup File Iterator - String prefix = config.getBackupPrefix(); - logger.info("Looking for meta file in the location: {}", prefix); - - Date strippedMsSnapshotTime = DateUtil.getDate(result.snapshotTime); - Iterator backupfiles = bkpStatusFs.list(prefix, strippedMsSnapshotTime, strippedMsSnapshotTime); - //Return validation fail if backup filesystem listing failed. - if (!backupfiles.hasNext()) { - logger.warn("ERROR: No files available while doing backup filesystem listing. Declaring the verification failed."); - return result; - } - - result.backupFileListAvail = true; - - List metas = new LinkedList<>(); - List s3Listing = new ArrayList<>(); - - while (backupfiles.hasNext()) { - AbstractBackupPath path = backupfiles.next(); - if (path.getFileName().equalsIgnoreCase("meta.json")) - metas.add(path); - else - s3Listing.add(path.getRemotePath()); + public List verifyAllBackups( + BackupVersion backupVersion, DateRange dateRange) + throws UnsupportedTypeException, IllegalArgumentException { + IMetaProxy metaProxy = getMetaProxy(backupVersion); + if (metaProxy == null) { + throw new UnsupportedTypeException( + "BackupVersion type: " + backupVersion + " is not supported"); } - if (metas.size() == 0) { - logger.error("No meta found for snapshotdate: {}", DateUtil.formatyyyyMMddHHmm(latestDate)); - return result; + if (dateRange == null) { + throw new IllegalArgumentException("dateRange provided is null"); } - result.metaFileFound = true; - //Download meta.json from backup location and uncompress it. - List metaFileList = new ArrayList<>(); - try { - Path metaFileLocation = FileSystems.getDefault().getPath(config.getDataFileLocation(), "tmp_meta.json"); - bkpStatusFs.download(metas.get(0), new FileOutputStream(metaFileLocation.toFile())); - logger.info("Meta file successfully downloaded to localhost: {}", metaFileLocation.toString()); - - JSONParser jsonParser = new JSONParser(); - org.json.simple.JSONArray fileList = (org.json.simple.JSONArray) jsonParser.parse(new FileReader(metaFileLocation.toFile())); - for (int i = 0; i < fileList.size(); i++) - metaFileList.add(fileList.get(i).toString()); - - } catch (Exception e) { - logger.error("Error while fetching meta.json from path: {}", metas.get(0), e); - return result; - } + List result = new ArrayList<>(); - if (metaFileList.isEmpty() && s3Listing.isEmpty()) { - logger.info("Uncommon Scenario: Both meta file and backup filesystem listing is empty. Considering this as success"); - result.valid = true; - return result; + List metadata = + backupStatusMgr.getLatestBackupMetadata(backupVersion, dateRange); + if (metadata == null || metadata.isEmpty()) return result; + for (BackupMetadata backupMetadata : metadata) { + if (backupMetadata.getLastValidated() == null) { + BackupVerificationResult backupVerificationResult = + verifyBackup(metaProxy, backupMetadata); + if (logger.isDebugEnabled()) + logger.debug( + "BackupVerification: metadata: {}, result: {}", + backupMetadata, + backupVerificationResult); + if (backupVerificationResult.valid) { + backupMetadata.setLastValidated(new Date(DateUtil.getInstant().toEpochMilli())); + backupStatusMgr.update(backupMetadata); + result.add(backupVerificationResult); + } + } } + return result; + } - //Atleast meta file or s3 listing contains some file. - result.filesInS3Only = new ArrayList<>(s3Listing); - result.filesInS3Only.removeAll(metaFileList); - result.filesInMetaOnly = new ArrayList<>(metaFileList); - result.filesInMetaOnly.removeAll(s3Listing); - result.filesMatched = (ArrayList) CollectionUtils.intersection(metaFileList, s3Listing); - - //There could be a scenario that backupfilesystem has more files than meta file. e.g. some leftover objects - if (result.filesInMetaOnly.size() == 0) - result.valid = true; + /** returns the latest valid backup verification result if we have found one within the SLO * */ + public Optional getLatestVerfifiedBackupTime() { + return latestResult == null ? Optional.empty() : Optional.of(latestResult.snapshotInstant); + } - return result; + private BackupVerificationResult verifyBackup( + IMetaProxy metaProxy, BackupMetadata latestBackupMetaData) { + Path metadataLocation = Paths.get(latestBackupMetaData.getSnapshotLocation()); + metadataLocation = metadataLocation.subpath(1, metadataLocation.getNameCount()); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseRemote(metadataLocation.toString()); + return metaProxy.isMetaFileValid(abstractBackupPath); } } diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupVerificationResult.java b/priam/src/main/java/com/netflix/priam/backup/BackupVerificationResult.java index 4f1ce4c97..1076eb66a 100644 --- a/priam/src/main/java/com/netflix/priam/backup/BackupVerificationResult.java +++ b/priam/src/main/java/com/netflix/priam/backup/BackupVerificationResult.java @@ -1,35 +1,37 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.time.Instant; +import java.util.ArrayList; import java.util.List; /** - * Created by aagrawal on 2/16/17. - * This class holds the result from BackupVerification. The default are all null and false. + * Created by aagrawal on 2/16/17. This class holds the result from BackupVerification. The default + * are all null and false. */ - public class BackupVerificationResult { - public boolean snapshotAvailable = false; public boolean valid = false; - public boolean metaFileFound = false; - public boolean backupFileListAvail = false; - public String selectedDate = null; - public String snapshotTime = null; - public List filesInMetaOnly = null; - public List filesInS3Only = null; - public List filesMatched = null; -} \ No newline at end of file + public String remotePath = null; + public Instant snapshotInstant = null; + public boolean manifestAvailable = false; + public List filesInMetaOnly = new ArrayList<>(); + public int filesMatched = 0; + + @Override + public String toString() { + return GsonJsonSerializer.getGson().toJson(this); + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/BackupVersion.java b/priam/src/main/java/com/netflix/priam/backup/BackupVersion.java new file mode 100644 index 000000000..84ea3ad36 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/BackupVersion.java @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.netflix.priam.scheduler.UnsupportedTypeException; +import java.util.HashMap; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Enum to capture backup versions. Possible version are V1 and V2. Created by aagrawal on 1/29/19. + */ +public enum BackupVersion { + SNAPSHOT_BACKUP(1), + SNAPSHOT_META_SERVICE(2); + + private static final Logger logger = LoggerFactory.getLogger(BackupVersion.class); + + private final int backupVersion; + private static Map map = new HashMap<>(); + + static { + for (BackupVersion backupVersion : BackupVersion.values()) { + map.put(backupVersion.getBackupVersion(), backupVersion); + } + } + + BackupVersion(int backupVersion) { + this.backupVersion = backupVersion; + } + + public static BackupVersion lookup(int backupVersion, boolean acceptIllegalValue) + throws UnsupportedTypeException { + BackupVersion backupVersionResolved = map.get(backupVersion); + if (backupVersionResolved == null) { + String message = + String.format( + "%s is not a supported BackupVersion. Supported values are %s", + backupVersion, getSupportedValues()); + + if (acceptIllegalValue) { + message = + message + + ". Since acceptIllegalValue is set to True, returning NULL instead."; + logger.error(message); + return null; + } + + logger.error(message); + throw new UnsupportedTypeException(message); + } + return backupVersionResolved; + } + + private static String getSupportedValues() { + StringBuilder supportedValues = new StringBuilder(); + boolean first = true; + for (BackupVersion type : BackupVersion.values()) { + if (!first) { + supportedValues.append(","); + } + supportedValues.append(type); + first = false; + } + + return supportedValues.toString(); + } + + public static BackupVersion lookup(int backupVersion) throws UnsupportedTypeException { + return lookup(backupVersion, false); + } + + public int getBackupVersion() { + return backupVersion; + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/CommitLogBackup.java b/priam/src/main/java/com/netflix/priam/backup/CommitLogBackup.java index 2cacb6ff8..8de2b4dd0 100644 --- a/priam/src/main/java/com/netflix/priam/backup/CommitLogBackup.java +++ b/priam/src/main/java/com/netflix/priam/backup/CommitLogBackup.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; @@ -20,26 +18,23 @@ import com.google.inject.Provider; import com.google.inject.name.Named; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.utils.RetryableCallable; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.util.List; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.ArrayList; -import java.util.List; - - -//Providing this if we want to use it outside Quart +// Providing this if we want to use it outside Quart public class CommitLogBackup { private static final Logger logger = LoggerFactory.getLogger(CommitLogBackup.class); private final Provider pathFactory; - static List observers = new ArrayList(); - private final List clRemotePaths = new ArrayList(); + private final List clRemotePaths = Lists.newArrayList(); private final IBackupFileSystem fs; @Inject - public CommitLogBackup(Provider pathFactory, @Named("backup") IBackupFileSystem fs) { + public CommitLogBackup( + Provider pathFactory, @Named("backup") IBackupFileSystem fs) { this.pathFactory = pathFactory; this.fs = fs; } @@ -54,72 +49,36 @@ public List upload(String archivedDir, final String snapshot File archivedCommitLogDir = new File(archivedDir); if (!archivedCommitLogDir.exists()) { - throw new IllegalArgumentException("The archived commitlog director does not exist: " + archivedDir); + throw new IllegalArgumentException( + "The archived commitlog director does not exist: " + archivedDir); } if (logger.isDebugEnabled()) { logger.debug("Scanning for backup in: {}", archivedCommitLogDir.getAbsolutePath()); } - List bps = Lists.newArrayList(); + List bps = Lists.newArrayList(); for (final File file : archivedCommitLogDir.listFiles()) { logger.debug("Uploading commit log {} for backup", file.getCanonicalFile()); try { - AbstractBackupPath abp = (AbstractBackupPath) new RetryableCallable(3, 100L) { - public AbstractBackupPath retriableCall() throws Exception { + AbstractBackupPath bp = pathFactory.get(); + bp.parseLocal(file, BackupFileType.CL); - AbstractBackupPath bp = pathFactory.get(); - bp.parseLocal(file, BackupFileType.CL); - if (snapshotName != null) - bp.time = bp.parseDate(snapshotName); - upload(bp); - file.delete(); //TODO: should we put delete call here? We don't want to delete if the upload operation fails - return bp; - } - } - .call(); + if (snapshotName != null) bp.time = DateUtil.getDate(snapshotName); - if (abp != null) { - bps.add(abp); - } - addToRemotePath(abp.getRemotePath()); + fs.uploadAndDelete(bp, false /* async */); + bps.add(bp); + addToRemotePath(bp.getRemotePath()); } catch (Exception e) { - logger.error("Failed to upload local file {}. Ignoring to continue with rest of backup.", file, e); + logger.error( + "Failed to upload local file {}. Ignoring to continue with rest of backup.", + file, + e); } } return bps; } - private void upload(final AbstractBackupPath bp) - throws Exception { - new RetryableCallable() { - public Void retriableCall() - throws Exception { - fs.upload(bp, bp.localReader()); - return null; - } - } - .call(); - } - - public static void addObserver(IMessageObserver observer) { - observers.add(observer); - } - - public static void removeObserver(IMessageObserver observer) { - observers.remove(observer); - } - - public void notifyObservers() { - for (IMessageObserver observer : observers) - if (observer != null) { - logger.debug("Updating CommitLog observers now ..."); - observer.update(IMessageObserver.BACKUP_MESSAGE_TYPE.COMMITLOG, this.clRemotePaths); - } else { - logger.debug("Observer is Null, hence can not notify ..."); - } - } - - protected void addToRemotePath(String remotePath) { + private void addToRemotePath(String remotePath) { this.clRemotePaths.add(remotePath); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/backup/CommitLogBackupTask.java b/priam/src/main/java/com/netflix/priam/backup/CommitLogBackupTask.java index 36dff0a5e..e4e059b11 100644 --- a/priam/src/main/java/com/netflix/priam/backup/CommitLogBackupTask.java +++ b/priam/src/main/java/com/netflix/priam/backup/CommitLogBackupTask.java @@ -1,60 +1,46 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; - import com.google.inject.Inject; -import com.google.inject.Provider; import com.google.inject.Singleton; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.IMessageObserver.BACKUP_MESSAGE_TYPE; import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.TaskTimer; +import java.io.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.ArrayList; -import java.util.List; - - -//Provide this to be run as a Quart job +// Provide this to be run as a Quart job @Singleton public class CommitLogBackupTask extends AbstractBackup { - public static String JOBNAME = "CommitLogBackup"; + public static final String JOBNAME = "CommitLogBackup"; private static final Logger logger = LoggerFactory.getLogger(CommitLogBackupTask.class); - private final List clRemotePaths = new ArrayList(); - private static List observers = new ArrayList(); private final CommitLogBackup clBackup; - @Inject - public CommitLogBackupTask(IConfiguration config, Provider pathFactory, - CommitLogBackup clBackup, IFileSystemContext backupFileSystemCtx) { - super(config, backupFileSystemCtx, pathFactory); + public CommitLogBackupTask(IConfiguration config, CommitLogBackup clBackup) { + super(config); this.clBackup = clBackup; } - @Override public void execute() throws Exception { try { logger.debug("Checking for any archived commitlogs"); - //double-check the permission + // double-check the permission if (config.isBackingUpCommitLogs()) clBackup.upload(config.getCommitLogBackupRestoreFromDirs(), null); } catch (Exception e) { @@ -62,42 +48,19 @@ public void execute() throws Exception { } } - @Override public String getName() { return JOBNAME; } public static TaskTimer getTimer(IConfiguration config) { - return new SimpleTimer(JOBNAME, 60L * 1000); //every 1 min - } - - - public static void addObserver(IMessageObserver observer) { - observers.add(observer); - } - - public static void removeObserver(IMessageObserver observer) { - observers.remove(observer); - } - - public void notifyObservers() { - for (IMessageObserver observer : observers) { - if (observer != null) { - logger.debug("Updating CL observers now ..."); - observer.update(BACKUP_MESSAGE_TYPE.COMMITLOG, clRemotePaths); - } else - logger.info("Observer is Null, hence can not notify ..."); - } - } - - @Override - protected void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception { - //Do nothing. + if (config.isBackingUpCommitLogs()) + return new SimpleTimer(JOBNAME, 60L * 1000); // every 1 min + else return null; } @Override - protected void addToRemotePath(String remotePath) { - clRemotePaths.add(remotePath); + protected void processColumnFamily(File columnFamilyDirectory) { + // Do nothing. } } diff --git a/priam/src/main/java/com/netflix/priam/backup/DirectorySize.java b/priam/src/main/java/com/netflix/priam/backup/DirectorySize.java new file mode 100644 index 000000000..1dc44ec35 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/DirectorySize.java @@ -0,0 +1,10 @@ +package com.netflix.priam.backup; + +import com.google.inject.ImplementedBy; + +/** estimates the number of bytes remaining to upload in a snapshot */ +@ImplementedBy(SnapshotDirectorySize.class) +public interface DirectorySize { + /** return the total bytes of all snapshot files south of location in the filesystem */ + long getBytes(String location); +} diff --git a/priam/src/main/java/com/netflix/priam/backup/DynamicRateLimiter.java b/priam/src/main/java/com/netflix/priam/backup/DynamicRateLimiter.java new file mode 100644 index 000000000..dd1417d7f --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/DynamicRateLimiter.java @@ -0,0 +1,9 @@ +package com.netflix.priam.backup; + +import com.google.inject.ImplementedBy; +import java.time.Instant; + +@ImplementedBy(BackupDynamicRateLimiter.class) +public interface DynamicRateLimiter { + void acquire(AbstractBackupPath dir, Instant target, int tokens); +} diff --git a/priam/src/main/java/com/netflix/priam/backup/FileSnapshotStatusMgr.java b/priam/src/main/java/com/netflix/priam/backup/FileSnapshotStatusMgr.java index 721c66fe3..e59c5880d 100644 --- a/priam/src/main/java/com/netflix/priam/backup/FileSnapshotStatusMgr.java +++ b/priam/src/main/java/com/netflix/priam/backup/FileSnapshotStatusMgr.java @@ -21,79 +21,100 @@ import com.netflix.priam.config.IConfiguration; import com.netflix.priam.health.InstanceState; import com.netflix.priam.utils.MaxSizeHashMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Singleton; import java.io.*; import java.util.LinkedList; import java.util.Map; +import javax.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Default implementation for {@link IBackupStatusMgr}. This will save the snapshot status in local file. - * Created by aagrawal on 7/11/17. + * Default implementation for {@link IBackupStatusMgr}. This will save the snapshot status in local + * file. Created by aagrawal on 7/11/17. */ @Singleton public class FileSnapshotStatusMgr extends BackupStatusMgr { private static final Logger logger = LoggerFactory.getLogger(FileSnapshotStatusMgr.class); private static final int IN_MEMORY_SNAPSHOT_CAPACITY = 60; - private String filename; + private final String filename; /** * Constructor to initialize the file based snapshot status manager. * * @param config {@link IConfiguration} of priam to find where file should be saved/read from. - * @param instanceState Status of the instance encapsulating health and other metadata of Priam and Cassandra. + * @param instanceState Status of the instance encapsulating health and other metadata of Priam + * and Cassandra. */ @Inject public FileSnapshotStatusMgr(IConfiguration config, InstanceState instanceState) { - super(IN_MEMORY_SNAPSHOT_CAPACITY, instanceState); //Fetch capacity from properties, if required. + super( + IN_MEMORY_SNAPSHOT_CAPACITY, + instanceState); // Fetch capacity from properties, if required. this.filename = config.getBackupStatusFileLoc(); init(); } private void init() { - //Retrieve entire file and re-populate the list. + // Retrieve entire file and re-populate the list. File snapshotFile = new File(filename); if (!snapshotFile.exists()) { - logger.info("Snapshot status file do not exist on system. Bypassing initilization phase."); + snapshotFile.getParentFile().mkdirs(); + logger.info( + "Snapshot status file do not exist on system. Bypassing initilization phase."); + backupMetadataMap = new MaxSizeHashMap<>(capacity); return; } - try (final ObjectInputStream inputStream = new ObjectInputStream(new FileInputStream(snapshotFile))) { + try (final ObjectInputStream inputStream = + new ObjectInputStream(new FileInputStream(snapshotFile))) { backupMetadataMap = (Map>) inputStream.readObject(); - logger.info("Snapshot status of size {} fetched successfully from {}", backupMetadataMap.size(), filename); + logger.info( + "Snapshot status of size {} fetched successfully from {}", + backupMetadataMap.size(), + filename); } catch (IOException e) { - logger.error("Error while trying to fetch snapshot status from {}. Error: {}. If this is first time after upgrading Priam, ignore this.", filename, e.getLocalizedMessage()); + logger.error( + "Error while trying to fetch snapshot status from {}. Error: {}. If this is first time after upgrading Priam, ignore this.", + filename, + e.getLocalizedMessage()); e.printStackTrace(); } catch (Exception e) { - logger.error("Error while trying to fetch snapshot status from {}. Error: {}.", filename, e.getLocalizedMessage()); + logger.error( + "Error while trying to fetch snapshot status from {}. Error: {}.", + filename, + e.getLocalizedMessage()); e.printStackTrace(); } - if (backupMetadataMap == null) - backupMetadataMap = new MaxSizeHashMap<>(capacity); + if (backupMetadataMap == null) backupMetadataMap = new MaxSizeHashMap<>(capacity); } @Override public void save(BackupMetadata backupMetadata) { File snapshotFile = new File(filename); - if (!snapshotFile.exists()) - snapshotFile.mkdirs(); + if (!snapshotFile.exists()) snapshotFile.getParentFile().mkdirs(); - //Will save entire list to file. - try (final ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(filename))) { + // Will save entire list to file. + try (final ObjectOutputStream out = + new ObjectOutputStream(new FileOutputStream(filename))) { out.writeObject(backupMetadataMap); out.flush(); - logger.info("Snapshot status of size {} is saved to {}", backupMetadataMap.size(), filename); + logger.info( + "Snapshot status of size {} is saved to {}", + backupMetadataMap.size(), + filename); } catch (IOException e) { - logger.error("Error while trying to persist snapshot status to {}. Error: {}", filename, e.getLocalizedMessage()); + logger.error( + "Error while trying to persist snapshot status to {}. Error: {}", + filename, + e.getLocalizedMessage()); } } @Override public LinkedList fetch(String snapshotDate) { - //No need to fetch from local machine as it was read once at start. No point reading again and again. + // No need to fetch from local machine as it was read once at start. No point reading again + // and again. return backupMetadataMap.get(snapshotDate); } } diff --git a/priam/src/main/java/com/netflix/priam/backup/IBackupFileSystem.java b/priam/src/main/java/com/netflix/priam/backup/IBackupFileSystem.java index 0036d6736..e33ff3bfc 100644 --- a/priam/src/main/java/com/netflix/priam/backup/IBackupFileSystem.java +++ b/priam/src/main/java/com/netflix/priam/backup/IBackupFileSystem.java @@ -16,59 +16,174 @@ */ package com.netflix.priam.backup; -import java.io.InputStream; -import java.io.OutputStream; +import com.google.common.util.concurrent.ListenableFuture; +import java.io.FileNotFoundException; +import java.nio.file.Path; +import java.time.Instant; import java.util.Date; import java.util.Iterator; +import java.util.List; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; -/** - * Interface representing a backup storage as a file system - */ +/** Interface representing a backup storage as a file system */ public interface IBackupFileSystem { /** - * Write the contents of the specified remote path to the output stream and - * close + * Download the file denoted by remotePath to the local file system denoted by local path. + * + * @param path Backup path representing a local and remote file pair + * @param retry No. of times to retry to download a file from remote file system. If <1, it + * will try to download file exactly once. + * @throws BackupRestoreException if file is not available, downloadable or any other error from + * remote file system. */ - void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException; + void downloadFile(AbstractBackupPath path, String suffix, int retry) + throws BackupRestoreException; /** - * Write the contents of the specified remote path to the output stream and close. - * filePath denotes the diskPath of the downloaded file + * Download the file denoted by remotePath in an async fashion to the local file system denoted + * by local path. + * + * @param path Backup path representing a local and remote file pair + * @param retry No. of times to retry to download a file from remote file system. If <1, it + * will try to download file exactly once. + * @return The future of the async job to monitor the progress of the job. + * @throws BackupRestoreException if file is not available, downloadable or any other error from + * remote file system. + * @throws RejectedExecutionException if the queue is full and TIMEOUT is reached while trying + * to add the work to the queue. */ - void download(AbstractBackupPath path, OutputStream os, String filePath) throws BackupRestoreException; + Future asyncDownloadFile(final AbstractBackupPath path, final int retry) + throws BackupRestoreException, RejectedExecutionException; + + /** Overload that uploads as fast as possible without any custom throttling */ + default void uploadAndDelete(AbstractBackupPath path, boolean async) + throws FileNotFoundException, BackupRestoreException { + uploadAndDelete(path, Instant.EPOCH, async); + } /** - * Upload/Backup to the specified location with contents from the input - * stream. Closes the InputStream after its done. + * Upload the local file to its remote counterpart in an optionally async fashion. Both + * locations are embedded within the path parameter. De-duping of the file to upload will always + * be done by comparing the files-in-progress to be uploaded. This may result in this particular + * request to not to be executed e.g. if any other thread has given the same file to upload and + * that file is in internal queue. Note that de-duping is best effort and is not always + * guaranteed as we try to avoid lock on read/write of the files-in-progress. Once uploaded, + * files are deleted. Uploads are retried 10 times. + * + * @param path AbstractBackupPath to be used to send backup notifications only. + * @param target The target time of completion of all files in the upload. + * @param async boolean to determine whether the call should block or return immediately and + * upload asynchronously + * @return The future of the async job to monitor the progress of the job. This will be null if + * file was de-duped for upload. + * @throws BackupRestoreException in case of failure to upload for any reason including file not + * readable or remote file system errors. + * @throws FileNotFoundException If a file as denoted by localPath is not available or is a + * directory. + * @throws RejectedExecutionException if the queue is full and TIMEOUT is reached while trying + * to add the work to the queue. */ - void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException; + ListenableFuture uploadAndDelete( + final AbstractBackupPath path, Instant target, boolean async) + throws FileNotFoundException, RejectedExecutionException, BackupRestoreException; /** - * List all files in the backup location for the specified time range. + * Get the shard where this object should be stored. For local file system this should be empty + * or null. For S3, it would be the location of the bucket. + * + * @return the location of the shard. */ - Iterator list(String path, Date start, Date till); + default String getShard() { + return ""; + } /** - * Get a list of prefixes for the cluster available in backup for the specified date + * Get the prefix path for the backup file system. This will be either the location of the + * remote file system for backup or the location from where we should restore. + * + * @return prefix path to the backup file system. */ + Path getPrefix(); + + /** + * List all files in the backup location for the specified time range. + * + * @param path This is used as the `prefix` for listing files in the filesystem. All the files + * that start with this prefix will be returned. + * @param start Start date of the file upload. + * @param till End date of the file upload. + * @return Iterator of the AbstractBackupPath matching the criteria. + */ + Iterator list(String path, Date start, Date till); + + /** Get a list of prefixes for the cluster available in backup for the specified date */ Iterator listPrefixes(Date date); /** - * Runs cleanup or set retention + * List all the files with the given prefix, delimiter, and marker. Files should be returned + * ordered by last modified time descending. This should never return null. + * + * @param prefix Common prefix of the elements to search in the backup file system. + * @param delimiter All the object will end with this delimiter. + * @param marker Start the fetch with this as the first object. + * @return the iterator on the backup file system containing path of the files. */ + Iterator listFileSystem(String prefix, String delimiter, String marker); + + /** Runs cleanup or set retention */ void cleanup(); + /** Give the file system a chance to terminate any thread pools, etc. */ + void shutdown(); + /** - * Get number of active upload or downloads + * Get the size of the remote object + * + * @param remotePath Location of the object on the remote file system. + * @return size of the object on the remote filesystem. + * @throws BackupRestoreException in case of failure to read object denoted by remotePath or any + * other error. */ - int getActivecount(); + long getFileSize(String remotePath) throws BackupRestoreException; /** - * Give the file system a chance to terminate any thread pools, etc. + * Checks if the file denoted by remotePath exists on the remote file system. It does not need + * check if object was completely uploaded to remote file system. + * + * @param remotePath location on the remote file system. + * @return boolean value indicating presence of the file on remote file system. */ - void shutdown(); + default boolean checkObjectExists(Path remotePath) { + return false; + } + + /** + * Delete list of remote files from the remote file system. It should throw exception if there + * is anything wrong in processing the request. If the remotePath passed do not exist, then it + * should just keep quiet. + * + * @param remotePaths list of files on remote file system to be deleted. This path may or may + * not exist. + * @throws BackupRestoreException in case of remote file system not able to process the request + * or unable to reach. + */ + void deleteRemoteFiles(List remotePaths) throws BackupRestoreException; - long getBytesUploaded(); + /** + * Get the number of tasks en-queue in the filesystem for upload. + * + * @return the total no. of tasks to be executed. + */ + int getUploadTasksQueued(); + + /** + * Get the number of tasks en-queue in the filesystem for download. + * + * @return the total no. of tasks to be executed. + */ + int getDownloadTasksQueued(); - long getAWSSlowDownExceptionCounter(); + /** Clear the cache for the backup file system, if any. */ + void clearCache(); } diff --git a/priam/src/main/java/com/netflix/priam/backup/IBackupStatusMgr.java b/priam/src/main/java/com/netflix/priam/backup/IBackupStatusMgr.java index 97054fe35..95b69b007 100644 --- a/priam/src/main/java/com/netflix/priam/backup/IBackupStatusMgr.java +++ b/priam/src/main/java/com/netflix/priam/backup/IBackupStatusMgr.java @@ -1,31 +1,28 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; import com.google.inject.ImplementedBy; - +import com.netflix.priam.utils.DateUtil; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Map; /** - * This will store the status of snapshots as they start, fail or finish. By default they will save the snapshot - * status of last 60 days on instance. - * Created by aagrawal on 1/30/17. + * This will store the status of snapshots as they start, fail or finish. By default they will save + * the snapshot status of last 60 days on instance. Created by aagrawal on 1/30/17. */ @ImplementedBy(FileSnapshotStatusMgr.class) public interface IBackupStatusMgr { @@ -40,20 +37,23 @@ public interface IBackupStatusMgr { /** * Return the list of snapshot executed on provided day or null if not present. * - * @param snapshotDate date on which snapshot was started in the format of yyyyMMdd or yyyyMMddHHmm. + * @param snapshotDate date on which snapshot was started in the format of yyyyMMdd or + * yyyyMMddHHmm. * @return List of snapshots started on that day in descending order of snapshot start time. */ List locate(String snapshotDate); /** - * Save the status of snapshot BackupMetadata which started in-memory and other implementations, if any. + * Save the status of snapshot BackupMetadata which started in-memory and other implementations, + * if any. * * @param backupMetadata backupmetadata that started */ void start(BackupMetadata backupMetadata); /** - * Save the status of successfully finished snapshot BackupMetadata in-memory and other implementations, if any. + * Save the status of successfully finished snapshot BackupMetadata in-memory and other + * implementations, if any. * * @param backupMetadata backupmetadata that finished successfully */ @@ -66,6 +66,13 @@ public interface IBackupStatusMgr { */ void failed(BackupMetadata backupMetadata); + /** + * Update the backup information of backupmetadata in-memory and other implementations, if any. + * + * @param backupMetadata backupmetadata to be updated. + */ + void update(BackupMetadata backupMetadata); + /** * Get the capacity of in-memory status map holding the snapshot status. * @@ -76,8 +83,21 @@ public interface IBackupStatusMgr { /** * Get the entire map of snapshot status hold in-memory * - * @return The map of snapshot status in-memory in format. - * Key is snapshot day in format of yyyyMMdd (start date of snapshot) with a list of snapshots in the descending order of snapshot start time. + * @return The map of snapshot status in-memory in format. Key is snapshot day in format of + * yyyyMMdd (start date of snapshot) with a list of snapshots in the descending order of + * snapshot start time. */ Map> getAllSnapshotStatus(); + + /** + * Get the list of backup metadata which are finished and have started in the daterange + * provided, in reverse chronological order of start date. + * + * @param backupVersion backup version of the backups to search. + * @param dateRange time period in which snapshot should have started. Finish time may be after + * the endTime in input. + * @return list of backup metadata which satisfies the input criteria + */ + List getLatestBackupMetadata( + BackupVersion backupVersion, DateUtil.DateRange dateRange); } diff --git a/priam/src/main/java/com/netflix/priam/backup/IFileSystemContext.java b/priam/src/main/java/com/netflix/priam/backup/IFileSystemContext.java index 8c9d9f961..380a98103 100755 --- a/priam/src/main/java/com/netflix/priam/backup/IFileSystemContext.java +++ b/priam/src/main/java/com/netflix/priam/backup/IFileSystemContext.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.backup; diff --git a/priam/src/main/java/com/netflix/priam/backup/IIncrementalBackup.java b/priam/src/main/java/com/netflix/priam/backup/IIncrementalBackup.java deleted file mode 100644 index 3dd48caf6..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/IIncrementalBackup.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup; - -public interface IIncrementalBackup { - - long INCREMENTAL_INTERVAL_IN_MILLISECS = 10L * 1000; - - /* - * @return the number of files pending to be uploaded. The semantic depends on whether the implementation - * is synchronous or asynchronous. - */ - long getNumPendingFiles(); - - String getJobName(); - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/IMessageObserver.java b/priam/src/main/java/com/netflix/priam/backup/IMessageObserver.java deleted file mode 100644 index 62da1a744..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/IMessageObserver.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.backup; - -import java.util.List; - - -public interface IMessageObserver { - - enum BACKUP_MESSAGE_TYPE {SNAPSHOT, INCREMENTAL, COMMITLOG, META} - - enum RESTORE_MESSAGE_TYPE {SNAPSHOT, INCREMENTAL, COMMITLOG, META} - - enum RESTORE_MESSAGE_STATUS {UPLOADED, DOWNLOADED, STREAMED} - - void update(BACKUP_MESSAGE_TYPE bkpMsgType, List remotePathNames); - - void update(RESTORE_MESSAGE_TYPE rstMsgType, List remotePathNames, RESTORE_MESSAGE_STATUS rstMsgStatus); - - void update(RESTORE_MESSAGE_TYPE rstMsgType, String remotePath, String fileDiskPath, RESTORE_MESSAGE_STATUS rstMsgStatus); - -} diff --git a/priam/src/main/java/com/netflix/priam/backup/IncrementalBackup.java b/priam/src/main/java/com/netflix/priam/backup/IncrementalBackup.java index 388803220..43d23bf66 100644 --- a/priam/src/main/java/com/netflix/priam/backup/IncrementalBackup.java +++ b/priam/src/main/java/com/netflix/priam/backup/IncrementalBackup.java @@ -16,110 +16,131 @@ */ package com.netflix.priam.backup; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.google.inject.Provider; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.backup.IMessageObserver.BACKUP_MESSAGE_TYPE; +import com.netflix.priam.backupv2.SnapshotMetaTask; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.TaskTimer; +import java.io.File; +import java.nio.file.Path; +import java.util.Set; +import org.apache.commons.io.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.ArrayList; -import java.util.List; - /* * Incremental/SSTable backup */ @Singleton -public class IncrementalBackup extends AbstractBackup implements IIncrementalBackup { +public class IncrementalBackup extends AbstractBackup { private static final Logger logger = LoggerFactory.getLogger(IncrementalBackup.class); public static final String JOBNAME = "IncrementalBackup"; - private final List incrementalRemotePaths = new ArrayList(); - private IncrementalMetaData metaData; - private BackupRestoreUtil backupRestoreUtil; - private static List observers = new ArrayList(); + private final BackupRestoreUtil backupRestoreUtil; + private final IBackupRestoreConfig backupRestoreConfig; + private final BackupHelper backupHelper; @Inject - public IncrementalBackup(IConfiguration config, Provider pathFactory, IFileSystemContext backupFileSystemCtx - , IncrementalMetaData metaData) { - super(config, backupFileSystemCtx, pathFactory); - this.metaData = metaData; //a means to upload audit trail (via meta_cf_yyyymmddhhmm.json) of files successfully uploaded) - backupRestoreUtil = new BackupRestoreUtil(config.getIncrementalKeyspaceFilters(), config.getIncrementalCFFilter()); + public IncrementalBackup( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + BackupHelper backupHelper) { + super(config); + // a means to upload audit trail (via meta_cf_yyyymmddhhmm.json) of files successfully + // uploaded) + this.backupRestoreConfig = backupRestoreConfig; + backupRestoreUtil = + new BackupRestoreUtil( + config.getIncrementalIncludeCFList(), config.getIncrementalExcludeCFList()); + this.backupHelper = backupHelper; } @Override public void execute() throws Exception { - //Clearing remotePath List - incrementalRemotePaths.clear(); + // Clearing remotePath List initiateBackup(INCREMENTAL_BACKUP_FOLDER, backupRestoreUtil); - if (incrementalRemotePaths.size() > 0) { - notifyObservers(); - } } - - /** - * Run every 10 Sec - */ - public static TaskTimer getTimer() { - return new SimpleTimer(JOBNAME, 10L * 1000); + /** Run every 10 Sec */ + public static TaskTimer getTimer( + IConfiguration config, IBackupRestoreConfig backupRestoreConfig) { + if (IncrementalBackup.isEnabled(config, backupRestoreConfig)) + return new SimpleTimer(JOBNAME, 10L * 1000); + return null; } - @Override - public String getName() { - return JOBNAME; - } - - public static void addObserver(IMessageObserver observer) { - observers.add(observer); - } - - public static void removeObserver(IMessageObserver observer) { - observers.remove(observer); - } - - private void notifyObservers() { - for (IMessageObserver observer : observers) { - if (observer != null) { - logger.debug("Updating incremental observers now ..."); - observer.update(BACKUP_MESSAGE_TYPE.INCREMENTAL, incrementalRemotePaths); - } else - logger.info("Observer is Null, hence can not notify ..."); + private static void cleanOldBackups(IConfiguration configuration) throws Exception { + Set backupPaths = + AbstractBackup.getBackupDirectories(configuration, INCREMENTAL_BACKUP_FOLDER); + for (Path backupDirPath : backupPaths) { + FileUtils.cleanDirectory(backupDirPath.toFile()); } } - @Override - protected void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception { - List uploadedFiles = upload(backupDir, BackupFileType.SST); - - if (!uploadedFiles.isEmpty()) { - String incrementalUploadTime = AbstractBackupPath.formatDate(uploadedFiles.get(0).getTime()); //format of yyyymmddhhmm (e.g. 201505060901) - String metaFileName = "meta_" + backupDir.getParent() + "_" + incrementalUploadTime; - logger.info("Uploading meta file for incremental backup: {}", metaFileName); - this.metaData.setMetaFileName(metaFileName); - this.metaData.set(uploadedFiles, incrementalUploadTime); - logger.info("Uploaded meta file for incremental backup: {}", metaFileName); + public static boolean isEnabled( + IConfiguration configuration, IBackupRestoreConfig backupRestoreConfig) { + boolean enabled = false; + try { + // Once backup 1.0 is gone, we should not check for enableV2Backups. + enabled = + (configuration.isIncrementalBackupEnabled() + && (SnapshotBackup.isBackupEnabled(configuration) + || (backupRestoreConfig.enableV2Backups() + && SnapshotMetaTask.isBackupEnabled( + backupRestoreConfig)))); + logger.info("Incremental backups are enabled: {}", enabled); + + if (!enabled) { + // Clean up the incremental backup folder. + cleanOldBackups(configuration); + } + } catch (Exception e) { + logger.error( + "Error while trying to find if incremental backup is enabled: " + + e.getMessage()); } - + return enabled; } @Override - protected void addToRemotePath(String remotePath) { - incrementalRemotePaths.add(remotePath); + public String getName() { + return JOBNAME; } @Override - public long getNumPendingFiles() { - throw new UnsupportedOperationException(); + protected void processColumnFamily(File backupDir) throws Exception { + BackupFileType fileType = + backupRestoreConfig.enableV2Backups() ? BackupFileType.SST_V2 : BackupFileType.SST; + + // upload SSTables and components + ImmutableList> futures = + backupHelper.uploadAndDeleteAllFiles( + backupDir, fileType, config.enableAsyncIncremental()); + Futures.whenAllComplete(futures).call(() -> null, MoreExecutors.directExecutor()); + + // Next, upload secondary indexes + fileType = BackupFileType.SECONDARY_INDEX_V2; + for (File directory : getSecondaryIndexDirectories(backupDir)) { + futures = + backupHelper.uploadAndDeleteAllFiles( + directory, fileType, config.enableAsyncIncremental()); + if (futures.stream().allMatch(ListenableFuture::isDone)) { + deleteIfEmpty(directory); + } else { + Futures.whenAllComplete(futures) + .call(() -> deleteIfEmpty(directory), MoreExecutors.directExecutor()); + } + } } - @Override - public String getJobName() { - return JOBNAME; + private Void deleteIfEmpty(File dir) { + if (FileUtils.sizeOfDirectory(dir) == 0) FileUtils.deleteQuietly(dir); + return null; } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/backup/IncrementalMetaData.java b/priam/src/main/java/com/netflix/priam/backup/IncrementalMetaData.java deleted file mode 100644 index 2d301062c..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/IncrementalMetaData.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup; - -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; -import org.apache.commons.io.FileUtils; - -import java.io.File; -import java.io.IOException; - -public class IncrementalMetaData extends MetaData { - - private String metaFileName = null; //format meta_cf_time (e.g. - - @Inject - public IncrementalMetaData(IConfiguration config, Provider pathFactory, IFileSystemContext backupFileSystemCtx) { - super(pathFactory, backupFileSystemCtx, config); - } - - public void setMetaFileName(String name) { - this.metaFileName = name; - } - - @Override - public File createTmpMetaFile() throws IOException { - File metafile = null, destFile = null; - - if (this.metaFileName == null) { - - metafile = File.createTempFile("incrementalMeta", ".json"); - destFile = new File(metafile.getParent(), "incrementalMeta.json"); - - } else { - metafile = File.createTempFile(this.metaFileName, ".json"); - destFile = new File(metafile.getParent(), this.metaFileName + ".json"); - } - - if (destFile.exists()) - destFile.delete(); - - try { - - FileUtils.moveFile(metafile, destFile); - - } finally { - if (metafile != null && metafile.exists()) { //clean up resource - FileUtils.deleteQuietly(metafile); - } - } - return destFile; - } -} diff --git a/priam/src/main/java/com/netflix/priam/backup/MetaData.java b/priam/src/main/java/com/netflix/priam/backup/MetaData.java index 17f172f5b..60dffbf62 100644 --- a/priam/src/main/java/com/netflix/priam/backup/MetaData.java +++ b/priam/src/main/java/com/netflix/priam/backup/MetaData.java @@ -16,160 +16,93 @@ */ package com.netflix.priam.backup; -import com.google.common.collect.Lists; import com.google.inject.Inject; import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.backup.IMessageObserver.BACKUP_MESSAGE_TYPE; -import com.netflix.priam.utils.RetryableCallable; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.List; import org.apache.commons.io.FileUtils; import org.json.simple.JSONArray; -import org.json.simple.parser.JSONParser; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.*; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; - /** - * Class to create a meta data file with a list of snapshot files. Also list the - * contents of a meta data file. + * Class to create a meta data file with a list of snapshot files. Also list the contents of a meta + * data file. */ public class MetaData { private static final Logger logger = LoggerFactory.getLogger(MetaData.class); private final Provider pathFactory; - private static List observers = new ArrayList(); - private final List metaRemotePaths = new ArrayList(); + private final List metaRemotePaths = new ArrayList<>(); private final IBackupFileSystem fs; @Inject - public MetaData(Provider pathFactory, IFileSystemContext backupFileSystemCtx, IConfiguration config) + public MetaData( + Provider pathFactory, + IFileSystemContext backupFileSystemCtx, + IConfiguration config) { - { this.pathFactory = pathFactory; this.fs = backupFileSystemCtx.getFileStrategy(config); } - public AbstractBackupPath set(List bps, String snapshotName) throws Exception { + public AbstractBackupPath set(List bps, String snapshotName) + throws Exception { File metafile = createTmpMetaFile(); - try(FileWriter fr = new FileWriter(metafile)) { + try (FileWriter fr = new FileWriter(metafile)) { JSONArray jsonObj = new JSONArray(); - for (AbstractBackupPath filePath : bps) - jsonObj.add(filePath.getRemotePath()); + for (AbstractBackupPath filePath : bps) jsonObj.add(filePath.getRemotePath()); fr.write(jsonObj.toJSONString()); } AbstractBackupPath backupfile = decorateMetaJson(metafile, snapshotName); - try { - upload(backupfile); - - addToRemotePath(backupfile.getRemotePath()); - if (metaRemotePaths.size() > 0) { - notifyObservers(); - } - } finally { - FileUtils.deleteQuietly(metafile); - } - + fs.uploadAndDelete(backupfile, false /* async */); + addToRemotePath(backupfile.getRemotePath()); return backupfile; } /* From the meta.json to be created, populate its meta data for the backup file. */ - public AbstractBackupPath decorateMetaJson(File metafile, String snapshotName) throws ParseException { + public AbstractBackupPath decorateMetaJson(File metafile, String snapshotName) + throws ParseException { AbstractBackupPath backupfile = pathFactory.get(); backupfile.parseLocal(metafile, BackupFileType.META); - backupfile.time = backupfile.parseDate(snapshotName); + backupfile.setTime(DateUtil.getDate(snapshotName)); return backupfile; } - /* - * Determines the existence of the backup meta file. This meta file could be snapshot (meta.json) or + * Determines the existence of the backup meta file. This meta file could be snapshot (meta.json) or * incrementals (meta_keyspace_cf..json). - * + * * @param backup meta file to search * @return true if backup meta file exist, false otherwise. */ public Boolean doesExist(final AbstractBackupPath meta) { try { - new RetryableCallable() { - @Override - public Void retriableCall() throws Exception { - fs.download(meta, new FileOutputStream(meta.newRestoreFile())); //download actual file to disk - return null; - } - }.call(); - + fs.downloadFile(meta, "" /* suffix */, 5 /* retries */); } catch (Exception e) { logger.error("Error downloading the Meta data try with a different date...", e); } return meta.newRestoreFile().exists(); - - } - - private void upload(final AbstractBackupPath bp) throws Exception { - new RetryableCallable() { - @Override - public Void retriableCall() throws Exception { - fs.upload(bp, bp.localReader()); - return null; - } - }.call(); - - bp.setCompressedFileSize(fs.getBytesUploaded()); } public File createTmpMetaFile() throws IOException { File metafile = File.createTempFile("meta", ".json"); File destFile = new File(metafile.getParent(), "meta.json"); - if (destFile.exists()) - destFile.delete(); + if (destFile.exists()) destFile.delete(); FileUtils.moveFile(metafile, destFile); return destFile; } - public static void addObserver(IMessageObserver observer) { - observers.add(observer); - } - - public static void removeObserver(IMessageObserver observer) { - observers.remove(observer); - } - - private void notifyObservers() { - for (IMessageObserver observer : observers) { - if (observer != null) { - logger.debug("Updating snapshot observers now ..."); - observer.update(BACKUP_MESSAGE_TYPE.META, metaRemotePaths); - } else - logger.info("Observer is Null, hence can not notify ..."); - } - } - private void addToRemotePath(String remotePath) { metaRemotePaths.add(remotePath); } - - public List toJson(File input) { - List files = Lists.newArrayList(); - try { - JSONArray jsonObj = (JSONArray) new JSONParser().parse(new FileReader(input)); - for (int i = 0; i < jsonObj.size(); i++) { - AbstractBackupPath p = pathFactory.get(); - p.parseRemote((String) jsonObj.get(i)); - files.add(p); - } - - } catch (Exception ex) { - throw new RuntimeException("Error transforming file " + input.getAbsolutePath() + " to JSON format. Msg:" + ex.getLocalizedMessage(), ex); - } - - logger.debug("Transformed file {} to JSON. Number of JSON elements: {}", input.getAbsolutePath(), files.size()); - return files; - } } diff --git a/priam/src/main/java/com/netflix/priam/backup/RangeReadInputStream.java b/priam/src/main/java/com/netflix/priam/backup/RangeReadInputStream.java index d2e41af46..bb771c151 100644 --- a/priam/src/main/java/com/netflix/priam/backup/RangeReadInputStream.java +++ b/priam/src/main/java/com/netflix/priam/backup/RangeReadInputStream.java @@ -20,55 +20,48 @@ import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.netflix.priam.utils.RetryableCallable; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.io.InputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * An implementation of InputStream that will request explicit byte ranges of the target file. - * This will make it easier to retry a failed read - which is important if we don't want to \ - * throw away a 100Gb file and restart after reading 99Gb and failing. + * An implementation of InputStream that will request explicit byte ranges of the target file. This + * will make it easier to retry a failed read - which is important if we don't want to \ throw away + * a 100Gb file and restart after reading 99Gb and failing. */ public class RangeReadInputStream extends InputStream { private static final Logger logger = LoggerFactory.getLogger(RangeReadInputStream.class); private final AmazonS3 s3Client; private final String bucketName; - private final AbstractBackupPath path; + private final long fileSize; + private final String remotePath; private long offset; - public RangeReadInputStream(AmazonS3 s3Client, String bucketName, AbstractBackupPath path) { + public RangeReadInputStream( + AmazonS3 s3Client, String bucketName, long fileSize, String remotePath) { this.s3Client = s3Client; this.bucketName = bucketName; - this.path = path; + this.fileSize = fileSize; + this.remotePath = remotePath; } public int read(final byte b[], final int off, final int len) throws IOException { -// logger.info(String.format("incoming buf req's size = %d, off = %d, len to read = %d, on file size %d, cur offset = %d path = %s", -// b.length, off, len, path.getSize(), offset, path.getRemotePath())); - final long fileSize = path.getSize(); - if (fileSize > 0 && offset >= fileSize) - return -1; + if (fileSize > 0 && offset >= fileSize) return -1; final long firstByte = offset; long curEndByte = firstByte + len; curEndByte = curEndByte <= fileSize ? curEndByte : fileSize; - //need to subtract one as the call to getRange is inclusive - //meaning if you want to download the first 10 bytes of a file, request bytes 0..9 + // need to subtract one as the call to getRange is inclusive + // meaning if you want to download the first 10 bytes of a file, request bytes 0..9 final long endByte = curEndByte - 1; -// logger.info(String.format("start byte = %d, end byte = %d", firstByte, endByte)); try { - Integer cnt = new RetryableCallable() { + return new RetryableCallable() { public Integer retriableCall() throws IOException { - GetObjectRequest req = new GetObjectRequest(bucketName, path.getRemotePath()); + GetObjectRequest req = new GetObjectRequest(bucketName, remotePath); req.setRange(firstByte, endByte); - S3ObjectInputStream is = null; - try { - is = s3Client.getObject(req).getObjectContent(); - + try (S3ObjectInputStream is = s3Client.getObject(req).getObjectContent()) { byte[] readBuf = new byte[4092]; int rCnt; int readTotal = 0; @@ -77,22 +70,18 @@ public Integer retriableCall() throws IOException { System.arraycopy(readBuf, 0, b, incomingOffet, rCnt); readTotal += rCnt; incomingOffet += rCnt; -// logger.info(" local read cnt = " + rCnt + "Current Thread Name = "+Thread.currentThread().getName()); } - if (readTotal == 0 && rCnt == -1) - return -1; + if (readTotal == 0 && rCnt == -1) return -1; offset += readTotal; - return Integer.valueOf(readTotal); - } finally { - IOUtils.closeQuietly(is); + return readTotal; } } }.call(); -// logger.info("read cnt = " + cnt); - return cnt.intValue(); } catch (Exception e) { - String msg = String.format("failed to read offset range %d-%d of file %s whose size is %d", - firstByte, endByte, path.getRemotePath(), path.getSize()); + String msg = + String.format( + "failed to read offset range %d-%d of file %s whose size is %d", + firstByte, endByte, remotePath, fileSize); throw new IOException(msg, e); } } @@ -102,4 +91,3 @@ public int read() throws IOException { return -1; } } - diff --git a/priam/src/main/java/com/netflix/priam/backup/SnapshotBackup.java b/priam/src/main/java/com/netflix/priam/backup/SnapshotBackup.java index 58cb4aade..fdde89fa8 100644 --- a/priam/src/main/java/com/netflix/priam/backup/SnapshotBackup.java +++ b/priam/src/main/java/com/netflix/priam/backup/SnapshotBackup.java @@ -16,104 +16,141 @@ */ package com.netflix.priam.backup; +import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; import com.google.inject.Inject; -import com.google.inject.Provider; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.backup.IMessageObserver.BACKUP_MESSAGE_TYPE; -import com.netflix.priam.defaultimpl.CassandraOperations; +import com.netflix.priam.backupv2.ForgottenFilesManager; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.CassandraOperations; +import com.netflix.priam.health.CassandraMonitor; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.scheduler.CronTimer; import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.utils.CassandraMonitor; +import com.netflix.priam.utils.DateUtil; import com.netflix.priam.utils.ThreadSleeper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; import java.util.*; +import java.util.concurrent.Future; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Task for running daily snapshots - */ +/** Task for running daily snapshots */ @Singleton public class SnapshotBackup extends AbstractBackup { private static final Logger logger = LoggerFactory.getLogger(SnapshotBackup.class); public static final String JOBNAME = "SnapshotBackup"; private final MetaData metaData; - private final List snapshotRemotePaths = new ArrayList(); - private static List observers = new ArrayList(); private final ThreadSleeper sleeper = new ThreadSleeper(); private static final long WAIT_TIME_MS = 60 * 1000 * 10; - private InstanceIdentity instanceIdentity; - private IBackupStatusMgr snapshotStatusMgr; - private BackupRestoreUtil backupRestoreUtil; + private final InstanceIdentity instanceIdentity; + private final IBackupStatusMgr snapshotStatusMgr; + private final BackupRestoreUtil backupRestoreUtil; + private final ForgottenFilesManager forgottenFilesManager; private String snapshotName = null; + private Instant snapshotInstant = DateUtil.getInstant(); private List abstractBackupPaths = null; - private CassandraOperations cassandraOperations; + private final CassandraOperations cassandraOperations; + private final BackupHelper backupHelper; + private static final Lock lock = new ReentrantLock(); @Inject - public SnapshotBackup(IConfiguration config, Provider pathFactory, - MetaData metaData, IFileSystemContext backupFileSystemCtx - , IBackupStatusMgr snapshotStatusMgr - , InstanceIdentity instanceIdentity, CassandraOperations cassandraOperations) { - super(config, backupFileSystemCtx, pathFactory); + public SnapshotBackup( + IConfiguration config, + BackupHelper backupHelper, + MetaData metaData, + IBackupStatusMgr snapshotStatusMgr, + InstanceIdentity instanceIdentity, + CassandraOperations cassandraOperations, + ForgottenFilesManager forgottenFilesManager) { + super(config); + this.backupHelper = backupHelper; this.metaData = metaData; this.snapshotStatusMgr = snapshotStatusMgr; this.instanceIdentity = instanceIdentity; this.cassandraOperations = cassandraOperations; - backupRestoreUtil = new BackupRestoreUtil(config.getSnapshotKeyspaceFilters(), config.getSnapshotCFFilter()); + backupRestoreUtil = + new BackupRestoreUtil( + config.getSnapshotIncludeCFList(), config.getSnapshotExcludeCFList()); + this.forgottenFilesManager = forgottenFilesManager; } @Override public void execute() throws Exception { - //If Cassandra is started then only start Snapshot Backup + // If Cassandra is started then only start Snapshot Backup while (!CassandraMonitor.hasCassadraStarted()) { - logger.debug("Cassandra has not yet started, hence Snapshot Backup will start after [" + WAIT_TIME_MS / 1000 + "] secs ..."); + logger.debug( + "Cassandra has not yet started, hence Snapshot Backup will start after [" + + WAIT_TIME_MS / 1000 + + "] secs ..."); sleeper.sleep(WAIT_TIME_MS); } + // Do not allow more than one snapshot to run at the same time. This is possible as this + // happens on CRON. + if (!lock.tryLock()) { + logger.warn("Snapshot Operation is already running! Try again later."); + throw new Exception("Snapshot Operation already running"); + } + + try { + // Clean up all the backup directories, if any. + cleanOldBackups(config); + executeSnapshot(); + } finally { + lock.unlock(); + } + } + + private void executeSnapshot() throws Exception { Date startTime = Calendar.getInstance(TimeZone.getTimeZone("GMT")).getTime(); - snapshotName = pathFactory.get().formatDate(startTime); + snapshotName = DateUtil.formatyyyyMMddHHmm(startTime); + snapshotInstant = DateUtil.getInstant(); String token = instanceIdentity.getInstance().getToken(); // Save start snapshot status - BackupMetadata backupMetadata = new BackupMetadata(token, startTime); + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, token, startTime); snapshotStatusMgr.start(backupMetadata); try { logger.info("Starting snapshot {}", snapshotName); - //Clearing remotePath List - snapshotRemotePaths.clear(); cassandraOperations.takeSnapshot(snapshotName); + backupMetadata.setCassandraSnapshotSuccess(true); // Collect all snapshot dir's under keyspace dir's abstractBackupPaths = Lists.newArrayList(); - // Try to upload all the files as part of snapshot. If there is any error, there will be an exception and snapshot will be considered as failure. + // Try to upload all the files as part of snapshot. If there is any error, there will be + // an exception and snapshot will be considered as failure. initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil); - // All the files are uploaded successfully as part of snapshot. - //pre condition notify of meta.json upload - File tmpMetaFile = metaData.createTmpMetaFile(); //Note: no need to remove this temp as it is done within createTmpMetaFile() + // pre condition notify of meta.json upload + File tmpMetaFile = metaData.createTmpMetaFile(); + // Note: no need to remove this temp as it is done within createTmpMetaFile() AbstractBackupPath metaJsonAbp = metaData.decorateMetaJson(tmpMetaFile, snapshotName); - // Upload meta file AbstractBackupPath metaJson = metaData.set(abstractBackupPaths, snapshotName); logger.info("Snapshot upload complete for {}", snapshotName); - backupMetadata.setSnapshotLocation(config.getBackupPrefix() + File.separator + metaJson.getRemotePath()); + backupMetadata.setSnapshotLocation( + config.getBackupPrefix() + File.separator + metaJson.getRemotePath()); snapshotStatusMgr.finish(backupMetadata); - - if (snapshotRemotePaths.size() > 0) { - notifyObservers(); - } - } catch (Exception e) { - logger.error("Exception occurred while taking snapshot: {}. Exception: {}", snapshotName, e.getLocalizedMessage()); + logger.error( + "Exception occurred while taking snapshot: {}. Exception: {}", + snapshotName, + e.getLocalizedMessage()); snapshotStatusMgr.failed(backupMetadata); throw e; } finally { @@ -127,12 +164,10 @@ public void execute() throws Exception { private File getValidSnapshot(File snpDir, String snapshotName) { for (File snapshotDir : snpDir.listFiles()) - if (snapshotDir.getName().matches(snapshotName)) - return snapshotDir; + if (snapshotDir.getName().matches(snapshotName)) return snapshotDir; return null; } - @Override public String getName() { return JOBNAME; @@ -143,54 +178,50 @@ public static boolean isBackupEnabled(IConfiguration config) throws Exception { } public static TaskTimer getTimer(IConfiguration config) throws Exception { - CronTimer cronTimer = null; - switch (config.getBackupSchedulerType()) { - case HOUR: - if (config.getBackupHour() < 0) - logger.info("Skipping {} as it is disabled via backup hour: {}", JOBNAME, config.getBackupHour()); - else { - cronTimer = new CronTimer(JOBNAME, config.getBackupHour(), 1, 0); - logger.info("Starting snapshot backup with backup hour: {}", config.getBackupHour()); - } - break; - case CRON: - cronTimer = CronTimer.getCronTimer(JOBNAME, config.getBackupCronExpression()); - break; + TaskTimer timer = CronTimer.getCronTimer(JOBNAME, config.getBackupCronExpression()); + if (timer == null) { + // Clean up all the backup directories, if any. + cleanOldBackups(config); } - return cronTimer; + return timer; } - public static void addObserver(IMessageObserver observer) { - observers.add(observer); + private static void cleanOldBackups(IConfiguration configuration) throws Exception { + Set backupPaths = AbstractBackup.getBackupDirectories(configuration, SNAPSHOT_FOLDER); + for (Path backupDirPath : backupPaths) + try (DirectoryStream directoryStream = + Files.newDirectoryStream(backupDirPath, path -> Files.isDirectory(path))) { + for (Path backupDir : directoryStream) { + if (isValidBackupDir(backupDir)) { + FileUtils.deleteDirectory(backupDir.toFile()); + } + } + } } - public static void removeObserver(IMessageObserver observer) { - observers.remove(observer); - } + @Override + protected void processColumnFamily(File backupDir) throws Exception { + File snapshotDir = getValidSnapshot(backupDir, snapshotName); - private void notifyObservers() { - for (IMessageObserver observer : observers) { - if (observer != null) { - logger.debug("Updating snapshot observers now ..."); - observer.update(BACKUP_MESSAGE_TYPE.SNAPSHOT, snapshotRemotePaths); - } else - logger.info("Observer is Null, hence can not notify ..."); + if (snapshotDir == null) { + logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName); + return; } - } - @Override - protected void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception { - - File snapshotDir = getValidSnapshot(backupDir, snapshotName); + forgottenFilesManager.findAndMoveForgottenFiles(snapshotInstant, snapshotDir); // Add files to this dir - if (null != snapshotDir) - abstractBackupPaths.addAll(upload(snapshotDir, BackupFileType.SNAP)); - else - logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName); + + ImmutableList> futures = + backupHelper.uploadAndDeleteAllFiles( + snapshotDir, BackupFileType.SNAP, config.enableAsyncSnapshot()); + for (Future future : futures) { + abstractBackupPaths.add(future.get()); + } } - @Override - protected void addToRemotePath(String remotePath) { - snapshotRemotePaths.add(remotePath); + private static boolean isValidBackupDir(Path backupDir) { + String backupDirName = backupDir.toFile().getName(); + // Check if it of format yyyyMMddHHmm + return (DateUtil.getDate(backupDirName) != null); } } diff --git a/priam/src/main/java/com/netflix/priam/backup/SnapshotDirectorySize.java b/priam/src/main/java/com/netflix/priam/backup/SnapshotDirectorySize.java new file mode 100644 index 000000000..ab77cfe4a --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backup/SnapshotDirectorySize.java @@ -0,0 +1,50 @@ +package com.netflix.priam.backup; + +import java.io.IOException; +import java.nio.file.*; +import java.nio.file.attribute.BasicFileAttributes; + +/** Estimates remaining bytes to upload in a backup by looking at the file system */ +public class SnapshotDirectorySize implements DirectorySize { + + public long getBytes(String location) { + SummingFileVisitor fileVisitor = new SummingFileVisitor(); + try { + Files.walkFileTree(Paths.get(location), fileVisitor); + } catch (IOException e) { + // BackupFileVisitor is happy with an estimate and won't produce these in practice. + } + return fileVisitor.getTotalBytes(); + } + + private static final class SummingFileVisitor implements FileVisitor { + private long totalBytes; + + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + if (file.toString().contains(AbstractBackup.SNAPSHOT_FOLDER) && attrs.isRegularFile()) { + totalBytes += attrs.size(); + } + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException exc) { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) { + return FileVisitResult.CONTINUE; + } + + long getTotalBytes() { + return totalBytes; + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backup/Status.java b/priam/src/main/java/com/netflix/priam/backup/Status.java index ea2eba1f3..57cd1768b 100644 --- a/priam/src/main/java/com/netflix/priam/backup/Status.java +++ b/priam/src/main/java/com/netflix/priam/backup/Status.java @@ -16,20 +16,15 @@ */ package com.netflix.priam.backup; -/** - * Enum to describe the status of the snapshot/restore. - */ +/** Enum to describe the status of the snapshot/restore. */ public enum Status { - /** - * Denotes snapshot/restore has started successfully and is running. - */ + /** Denotes snapshot/restore has started successfully and is running. */ STARTED, - /** - * Denotes snapshot/restore has finished successfully. - */ + /** Denotes snapshot/restore has finished successfully. */ FINISHED, /** - * Denotes snapshot/restore has failed to upload/restore successfully or there was a failure marking the snapshot/restore as failure. + * Denotes snapshot/restore has failed to upload/restore successfully or there was a failure + * marking the snapshot/restore as failure. */ FAILED } diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/BackupPostProcessingCallback.java b/priam/src/main/java/com/netflix/priam/backup/parallel/BackupPostProcessingCallback.java deleted file mode 100644 index eee1661ea..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/BackupPostProcessingCallback.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -/* - * Encapsules one to many steps needed once an upload is completed. - */ -public interface BackupPostProcessingCallback { - - void postProcessing(E completedTask); -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/CassandraBackupQueueMgr.java b/priam/src/main/java/com/netflix/priam/backup/parallel/CassandraBackupQueueMgr.java deleted file mode 100644 index 1940a706c..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/CassandraBackupQueueMgr.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -import com.google.inject.Inject; -import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.AbstractSet; -import java.util.Date; -import java.util.HashSet; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - -/* - * - * Represents a queue of files (incrementals, snapshots, meta files) to be uploaded. - * - * Duties of the mgr include: - * - Mechanism to add a task, including de-duplication of tasks before adding to queue. - * - Guarantee delivery of task to only one consumer. - * - Provide relevant metrics including number of tasks in queue, etc. - */ - -@Singleton -public class CassandraBackupQueueMgr implements ITaskQueueMgr { - - private static final Logger logger = LoggerFactory.getLogger(CassandraBackupQueueMgr.class); - - private BlockingQueue tasks; //A queue of files to be uploaded - private AbstractSet tasksQueued; //A queue to determine what files have been queued, used for deduplication - - @Inject - public CassandraBackupQueueMgr(IConfiguration config) { - tasks = new ArrayBlockingQueue(config.getUncrementalBkupQueueSize()); - tasksQueued = new HashSet(config.getUncrementalBkupQueueSize()); //Key to task is the S3 absolute path (BASE/REGION/CLUSTER/TOKEN/[yyyymmddhhmm]/[SST|SNP|META]/KEYSPACE/COLUMNFAMILY/FILE - } - - @Override - /* - * Add task to queue if it does not already exist. For performance reasons, this behavior does not acquire a lock on the queue hence - * it is up to the caller to handle possible duplicate tasks. - * - * Note: will block until there is space in the queue. - */ - public void add(AbstractBackupPath task) { - if (!tasksQueued.contains(task.getRemotePath())) { - tasksQueued.add(task.getRemotePath()); - try { - tasks.put(task); //block until space becomes available in queue - logger.debug("Queued file {} within CF {}", task.getFileName(), task.getColumnFamily()); - - } catch (InterruptedException e) { - logger.warn("Interrupted waiting for the task queue to have free space, not fatal will just move on. Error Msg: {}", e.getLocalizedMessage()); - } - } else { - logger.debug("Already in queue, no-op. File: {}", task.getRemotePath()); - } - - } - - @Override - /* - * Guarantee delivery of a task to only one consumer. - * - * @return task, null if task in queue. - */ - public AbstractBackupPath take() throws InterruptedException { - AbstractBackupPath task = null; - if (!tasks.isEmpty()) { - - synchronized (tasks) { - task = tasks.poll(); //non-blocking call - } - } - - return task; - } - - @Override - /* - * @return true if there are more tasks. - * - * Note: this is a best effort so the caller should call me again just before taking a task. - * We anticipate this method will be invoked at a high frequency hence, making it thread-safe will slow down the appliation or - * worse yet, create a deadlock. For example, caller blocks to determine if there are more tasks and also blocks waiting to dequeue - * the task. - */ - public Boolean hasTasks() { - return !tasks.isEmpty(); - } - - @Override - /* - * A means to perform any post processing once the task has been completed. If post processing is needed, - * the consumer should notify this behavior via callback once the task is completed. - * - * *Note: "completed" here can mean success or failure. - */ - public void taskPostProcessing(AbstractBackupPath completedTask) { - this.tasksQueued.remove(completedTask.getRemotePath()); - } - - @Override - /* - * @return num of pending tasks. Note, the result is a best guess, don't rely on it to be 100% accurate. - */ - public Integer getNumOfTasksToBeProessed() { - return tasks.size(); - } - - @Override - public Boolean tasksCompleted(Date date) { - throw new UnsupportedOperationException(); - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/ITaskQueueMgr.java b/priam/src/main/java/com/netflix/priam/backup/parallel/ITaskQueueMgr.java deleted file mode 100644 index ee5ceb135..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/ITaskQueueMgr.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -/* - * - * Represents a queue of tasks to be completed. - * - * Duties of the mgr include: - * - Mechanism to add a task, including deduplication of tasks before adding to queue. - * - Guarantee delivery of task to only one consumer. - * - Provide relevant metrics including number of tasks in queue, number of tasks processed. - */ -public interface ITaskQueueMgr { - - void add(E task); - - /* - * @return task, null if none is available. - */ - E take() throws InterruptedException; - - /* - * @return true if there are tasks within queue to be processed; false otherwise. - */ - Boolean hasTasks(); - - /* - * A means to perform any post processing once the task has been completed. If post processing is needed, - * the consumer should notify this behavior via callback once the task is completed. - * - * *Note: "completed" here can mean success or failure. - */ - void taskPostProcessing(E completedTask); - - - Integer getNumOfTasksToBeProessed(); - - /* - * @return true if all tasks completed (includes failures) for a date; false, if at least 1 task is still in queue. - */ - Boolean tasksCompleted(java.util.Date date); -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBackupProducer.java b/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBackupProducer.java deleted file mode 100644 index d6c8d2c7e..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBackupProducer.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.google.inject.Singleton; -import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.*; -import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.scheduler.SimpleTimer; -import com.netflix.priam.scheduler.TaskTimer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; - -@Singleton -public class IncrementalBackupProducer extends AbstractBackup implements IIncrementalBackup { - - public static final String JOBNAME = "ParallelIncremental"; - private static final Logger logger = LoggerFactory.getLogger(IncrementalBackupProducer.class); - - private final List incrementalRemotePaths = new ArrayList(); - private IncrementalMetaData metaData; - private IncrementalConsumerMgr incrementalConsumerMgr; - private ITaskQueueMgr taskQueueMgr; - private BackupRestoreUtil backupRestoreUtil; - - @Inject - public IncrementalBackupProducer(IConfiguration config, Provider pathFactory, IFileSystemContext backupFileSystemCtx - , IncrementalMetaData metaData - , @Named("backup") ITaskQueueMgr taskQueueMgr ) { - - super(config, backupFileSystemCtx, pathFactory); - this.taskQueueMgr = taskQueueMgr; - this.metaData = metaData; - - init(backupFileSystemCtx); - } - - private void init(IFileSystemContext backupFileSystemCtx) { - backupRestoreUtil = new BackupRestoreUtil(config.getIncrementalKeyspaceFilters(), config.getIncrementalCFFilter()); - //"this" is a producer, lets wake up the "consumers" - this.incrementalConsumerMgr = new IncrementalConsumerMgr(this.taskQueueMgr, backupFileSystemCtx.getFileStrategy(config), super.config); - Thread consumerMgr = new Thread(this.incrementalConsumerMgr); - consumerMgr.start(); - - } - - @Override - protected void processColumnFamily(String keyspace, String columnFamily, File backupDir) throws Exception { - for (final File file : backupDir.listFiles()) { - try { - final AbstractBackupPath bp = pathFactory.get(); - bp.parseLocal(file, BackupFileType.SST); - this.taskQueueMgr.add(bp); //producer -- populate the queue of files. *Note: producer will block if queue is full. - } catch (Exception e) { - logger.warn("Unable to queue incremental file, treating as non-fatal and moving on to next. Msg: {} Fail to queue file: {}", - e.getLocalizedMessage(), file.getAbsolutePath()); - } - - } //end enqueuing all incremental files for a CF - } - - @Override - /* - * Keeping track of successfully uploaded files. - */ - protected void addToRemotePath(String remotePath) { - incrementalRemotePaths.add(remotePath); - } - - - @Override - public void execute() throws Exception { - //Clearing remotePath List - incrementalRemotePaths.clear(); - initiateBackup(INCREMENTAL_BACKUP_FOLDER, backupRestoreUtil); - } - - public void postProcessing() { - /* - * - * Upload the audit file of completed uploads - * - List uploadedFiles = upload(backupDir, BackupFileType.SST); - if ( ! uploadedFiles.isEmpty() ) { - String incrementalUploadTime = AbstractBackupPath.formatDate(uploadedFiles.get(0).getTime()); //format of yyyymmddhhmm (e.g. 201505060901) - String metaFileName = "meta_" + columnFamilyDir.getName() + "_" + incrementalUploadTime; - logger.info("Uploading meta file for incremental backup: " + metaFileName); - this.metaData.setMetaFileName(metaFileName); - this.metaData.set(uploadedFiles, incrementalUploadTime); - logger.info("Uploaded meta file for incremental backup: " + metaFileName); - } - */ - - /* * - * Notify observers once all incrremental uploads completed - * - if(incrementalRemotePaths.size() > 0) - { - notifyObservers(); - } - * */ - - } - - @Override - /* - * @return an identifier of purpose of the task. - */ - public String getName() { - return JOBNAME; - } - - @Override - public long getNumPendingFiles() { - throw new UnsupportedOperationException(); - } - - /** - * @return Timer that run every 10 Sec - */ - public static TaskTimer getTimer() { - return new SimpleTimer(JOBNAME, INCREMENTAL_INTERVAL_IN_MILLISECS); - } - - @Override - public String getJobName() { - return JOBNAME; - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBkupPostProcessing.java b/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBkupPostProcessing.java deleted file mode 100644 index 37a5d9878..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalBkupPostProcessing.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -import com.netflix.priam.backup.AbstractBackupPath; - -public class IncrementalBkupPostProcessing implements BackupPostProcessingCallback { - - private ITaskQueueMgr taskQueueMgr; - - public IncrementalBkupPostProcessing(ITaskQueueMgr taskQueueMgr) { - this.taskQueueMgr = taskQueueMgr; - } - - @Override - public void postProcessing(AbstractBackupPath completedTask) { - this.taskQueueMgr.taskPostProcessing(completedTask); - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumer.java b/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumer.java deleted file mode 100644 index e205f303f..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumer.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.IBackupFileSystem; -import com.netflix.priam.utils.RetryableCallable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/* - * Performs an upload of a file, with retries. - */ -public class IncrementalConsumer implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(IncrementalConsumer.class); - - private AbstractBackupPath bp; - private IBackupFileSystem fs; - private BackupPostProcessingCallback callback; - - /** - * Upload files. Does not delete the file in case of - * error. - * - * @param bp - the file to upload with additional metada - */ - public IncrementalConsumer(AbstractBackupPath bp, IBackupFileSystem fs - , BackupPostProcessingCallback callback - ) { - this.bp = bp; - this.bp.setType(AbstractBackupPath.BackupFileType.SST); //Tag this is an incremental upload, not snapshot - this.fs = fs; - this.callback = callback; - } - - @Override - /* - * Upload specified file, with retries logic. - * File will be deleted only if uploaded successfully. - */ - public void run() { - - logger.info("Consumer - about to upload file: {}", this.bp.getFileName()); - - try { - - new RetryableCallable() { - @Override - public Void retriableCall() throws Exception { - - java.io.InputStream is = null; - try { - is = bp.localReader(); - } catch (java.io.FileNotFoundException | RuntimeException e) { - if (is != null) { - is.close(); - } - throw new java.util.concurrent.CancellationException("Someone beat me to uploading this file" - + ", no need to retry. Most likely not needed but to be safe, checked and released handle to file if appropriate."); - } - - try { - if (is == null) { - throw new NullPointerException("Unable to get handle on file: " + bp.getFileName()); - } - fs.upload(bp, is); - bp.setCompressedFileSize(fs.getBytesUploaded()); - return null; - } catch (Exception e) { - logger.error("Exception uploading local file {}, releasing handle, and will retry.", bp.getFileName()); - if (is != null) { - is.close(); - } - throw e; - } - } - }.call(); - - this.bp.getBackupFile().delete(); //resource cleanup - this.callback.postProcessing(bp); //post processing - } catch (Exception e) { - if (e instanceof java.util.concurrent.CancellationException) { - logger.debug("Failed to upload local file {}. Ignoring to continue with rest of backup. Msg: {}", this.bp.getFileName(), e.getLocalizedMessage()); - } else { - logger.error("Failed to upload local file {}. Ignoring to continue with rest of backup. Msg: {}", this.bp.getFileName(), e.getLocalizedMessage()); - } - } - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumerMgr.java b/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumerMgr.java deleted file mode 100644 index c887bd974..000000000 --- a/priam/src/main/java/com/netflix/priam/backup/parallel/IncrementalConsumerMgr.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backup.parallel; - -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.IBackupFileSystem; -import com.netflix.priam.backup.IIncrementalBackup; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicBoolean; - -/* - * Monitors files to be uploaded and assigns each file to a worker - */ -public class IncrementalConsumerMgr implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(IncrementalConsumerMgr.class); - - private AtomicBoolean run = new AtomicBoolean(true); - private ThreadPoolExecutor executor; - private IBackupFileSystem fs; - private ITaskQueueMgr taskQueueMgr; - private BackupPostProcessingCallback callback; - - public IncrementalConsumerMgr(ITaskQueueMgr taskQueueMgr, IBackupFileSystem fs - , IConfiguration config - ) { - this.taskQueueMgr = taskQueueMgr; - this.fs = fs; - - /* - * Too few threads, the queue will build up, consuming a lot of memory. - * Too many threads on the other hand will slow down the whole system due to excessive context switches - and lead to same symptoms. - */ - int maxWorkers = config.getIncrementalBkupMaxConsumers(); - /* - * ThreadPoolExecutor will move the file to be uploaded as a Runnable task in the work queue. - */ - BlockingQueue workQueue = new ArrayBlockingQueue(config.getIncrementalBkupMaxConsumers() * 2); - /* - * If there all workers are busy, the calling thread for the submit() will itself upload the file. This is a way to throttle how many files are moved to the - * worker queue. Specifically, the calling will continue to perform the upload unless a worker is avaialble. - */ - RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy(); - executor = new ThreadPoolExecutor(maxWorkers, maxWorkers, 60, TimeUnit.SECONDS, - workQueue, rejectedExecutionHandler); - - callback = new IncrementalBkupPostProcessing(this.taskQueueMgr); - } - - /* - * Stop looking for files to upload - */ - public void shutdown() { - this.run.set(false); - this.executor.shutdown(); //will not accept new task and waits for active threads to be completed before shutdown. - } - - @Override - public void run() { - while (this.run.get()) { - - while (this.taskQueueMgr.hasTasks()) { - try { - AbstractBackupPath bp = this.taskQueueMgr.take(); - - IncrementalConsumer task = new IncrementalConsumer(bp, this.fs, this.callback); - executor.submit(task); //non-blocking, will be rejected if the task cannot be scheduled - - - } catch (InterruptedException e) { - logger.warn("Was interrupted while wating to dequeued a task. Msgl: {}", e.getLocalizedMessage()); - } - } - - //Lets not overwhelmend the node hence we will pause before checking the work queue again. - try { - Thread.currentThread().sleep(IIncrementalBackup.INCREMENTAL_INTERVAL_IN_MILLISECS); - } catch (InterruptedException e) { - logger.warn("Was interrupted while sleeping until next interval run. Msgl: {}", e.getLocalizedMessage()); - } - } - - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/backupv2/BackupTTLTask.java b/priam/src/main/java/com/netflix/priam/backupv2/BackupTTLTask.java new file mode 100644 index 000000000..534bbff0a --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/BackupTTLTask.java @@ -0,0 +1,274 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.google.inject.Singleton; +import com.netflix.priam.backup.*; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.InstanceState; +import com.netflix.priam.identity.token.TokenRetriever; +import com.netflix.priam.scheduler.SimpleTimer; +import com.netflix.priam.scheduler.Task; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.*; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import javax.inject.Named; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.math.Fraction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is used to TTL or delete the SSTable components from the backups after they are not + * referenced in the backups for more than {@link IConfiguration#getBackupRetentionDays()}. This + * operation is executed on CRON and is configured via {@link + * IBackupRestoreConfig#getBackupTTLMonitorPeriodInSec()}. + * + *

To TTL the SSTable components we refer to the first manifest file on the remote file system + * after the TTL period. Any sstable components referenced in that manifest file should not be + * deleted. Any other sstable components (files) on remote file system before the TTL period can be + * safely deleted. Created by aagrawal on 11/26/18. + */ +@Singleton +public class BackupTTLTask extends Task { + private static final Logger logger = LoggerFactory.getLogger(BackupTTLTask.class); + private IBackupRestoreConfig backupRestoreConfig; + private IMetaProxy metaProxy; + private IBackupFileSystem fileSystem; + private Provider abstractBackupPathProvider; + private InstanceState instanceState; + public static final String JOBNAME = "BackupTTLService"; + private Map filesInMeta = new HashMap<>(); + private List filesToDelete = new ArrayList<>(); + private static final Lock lock = new ReentrantLock(); + private final int BATCH_SIZE = 1000; + private final Instant start_of_feature = DateUtil.parseInstant("201801010000"); + private final int maxWaitMillis; + + @Inject + public BackupTTLTask( + IConfiguration configuration, + IBackupRestoreConfig backupRestoreConfig, + @Named("v2") IMetaProxy metaProxy, + IFileSystemContext backupFileSystemCtx, + Provider abstractBackupPathProvider, + InstanceState instanceState, + TokenRetriever tokenRetriever) + throws Exception { + super(configuration); + this.backupRestoreConfig = backupRestoreConfig; + this.metaProxy = metaProxy; + this.fileSystem = backupFileSystemCtx.getFileStrategy(configuration); + this.abstractBackupPathProvider = abstractBackupPathProvider; + this.instanceState = instanceState; + this.maxWaitMillis = + 1_000 + * backupRestoreConfig.getBackupTTLMonitorPeriodInSec() + / tokenRetriever.getRingPosition().getDenominator(); + } + + @Override + public void execute() throws Exception { + if (instanceState.getRestoreStatus() != null + && instanceState.getRestoreStatus().getStatus() != null + && instanceState.getRestoreStatus().getStatus() == Status.STARTED) { + logger.info("Not executing the TTL Task for backups as Priam is in restore mode."); + return; + } + + // Do not allow more than one backupTTLService to run at the same time. This is possible + // as this happens on CRON. + if (!lock.tryLock()) { + logger.warn("{} is already running! Try again later.", JOBNAME); + throw new Exception(JOBNAME + " already running"); + } + + // Sleep a random amount but not so long that it will spill into the next token's turn. + if (maxWaitMillis > 0) Thread.sleep(new Random().nextInt(maxWaitMillis)); + + try { + filesInMeta.clear(); + filesToDelete.clear(); + + Instant dateToTtl = + DateUtil.getInstant().minus(config.getBackupRetentionDays(), ChronoUnit.DAYS); + + // Find the snapshot just after this date. + List metas = + metaProxy.findMetaFiles( + new DateUtil.DateRange(dateToTtl, DateUtil.getInstant())); + + if (metas.size() == 0) { + logger.info("No meta file found and thus cannot run TTL Service"); + return; + } + + // Get the first file after the TTL time as we get files which are sorted latest to + // oldest. + AbstractBackupPath metaFile = metas.get(metas.size() - 1); + + // Download the meta file to local file system. + Path localFile = metaProxy.downloadMetaFile(metaFile); + + // Walk over the file system iterator and if not in map, it is eligible for delete. + new MetaFileWalker().readMeta(localFile); + + logger.info("No. of component files loaded from meta file: {}", filesInMeta.size()); + + // Delete the meta file downloaded locally + FileUtils.deleteQuietly(localFile.toFile()); + + // If there are no files listed in meta, do not delete. This could be a bug!! + if (filesInMeta.isEmpty()) { + logger.warn("Meta file was empty. This should not happen. Getting out!!"); + return; + } + + // Delete the old META files. We are giving start date which is so back in past to get + // all the META files. + // This feature did not exist in Jan 2018. + metas = + metaProxy.findMetaFiles( + new DateUtil.DateRange( + start_of_feature, dateToTtl.minus(1, ChronoUnit.HOURS))); + + if (metas != null && metas.size() != 0) { + logger.info( + "Will delete(TTL) {} META files starting from: [{}]", + metas.size(), + metas.get(metas.size() - 1).getLastModified()); + for (AbstractBackupPath meta : metas) { + deleteFile(meta, false); + } + } + + Iterator remoteFileLocations = + fileSystem.listFileSystem(getSSTPrefix(), null, null); + + /* + We really cannot delete the files until the TTL period. + Cassandra can flush files on file system like Index.db first and other component files later (like 30 mins). If there is a snapshot in between, then this "single" component file would not be part of the snapshot as SSTable is still not part of Cassandra's "view". Only if Cassandra could provide strong guarantees on the file system such that - + + 1. All component will be flushed to disk as real SSTables only if they are part of the view. Until that happens all the files will be "tmp" files. + 2. All component flushed will have the same "last modified" file. i.e. on the first flush. Stats.db can change over time and that is OK. + Since this is not the case, the TTL may end up deleting this file even though the file is part of the next snapshot. To avoid, this we add grace period (based on how long compaction can run) when we delete the files. + */ + dateToTtl = dateToTtl.minus(config.getGracePeriodDaysForCompaction(), ChronoUnit.DAYS); + logger.info( + "Will delete(TTL) SST_V2 files which are before this time: {}. Input: [TTL: {} days, Grace Period: {} days]", + dateToTtl, + config.getBackupRetentionDays(), + config.getGracePeriodDaysForCompaction()); + + while (remoteFileLocations.hasNext()) { + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseRemote(remoteFileLocations.next()); + // If lastModifiedTime is after the dateToTTL, we should get out of this loop as + // remote file systems always give locations which are sorted. + if (abstractBackupPath.getLastModified().isAfter(dateToTtl)) { + logger.info( + "Breaking from TTL. Got a key which is after the TTL time: {}", + abstractBackupPath.getRemotePath()); + break; + } + + if (!filesInMeta.containsKey(abstractBackupPath.getRemotePath())) { + deleteFile(abstractBackupPath, false); + } else { + if (logger.isDebugEnabled()) + logger.debug( + "Not deleting this key as it is referenced in backups: {}", + abstractBackupPath.getRemotePath()); + } + } + + // Delete remaining files. + deleteFile(null, true); + + logger.info("Finished processing files for TTL service"); + } finally { + lock.unlock(); + } + } + + private void deleteFile(AbstractBackupPath path, boolean forceClear) + throws BackupRestoreException { + if (path != null) filesToDelete.add(Paths.get(path.getRemotePath())); + + if (forceClear || filesToDelete.size() >= BATCH_SIZE) { + fileSystem.deleteRemoteFiles(filesToDelete); + filesToDelete.clear(); + } + } + + private String getSSTPrefix() { + Path location = fileSystem.getPrefix(); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + return abstractBackupPath + .remoteV2Prefix(location, AbstractBackupPath.BackupFileType.SST_V2) + .toString(); + } + + @Override + public String getName() { + return JOBNAME; + } + + /** + * Interval between trying to TTL data on Remote file system. + * + * @param backupRestoreConfig {@link IBackupRestoreConfig#getBackupTTLMonitorPeriodInSec()} to + * get configuration details from priam. Use "-1" to disable the service. + * @return the timer to be used for backup ttl service. + * @throws Exception if the configuration is not set correctly or are not valid. This is to + * ensure we fail-fast. + */ + public static TaskTimer getTimer( + IBackupRestoreConfig backupRestoreConfig, Fraction ringPosition) throws Exception { + int period = backupRestoreConfig.getBackupTTLMonitorPeriodInSec(); + Instant start = Instant.ofEpochSecond((long) (period * ringPosition.doubleValue())); + return new SimpleTimer(JOBNAME, period, start); + } + + private class MetaFileWalker extends MetaFileReader { + @Override + public void process(ColumnFamilyResult columnfamilyResult) { + columnfamilyResult + .getSstables() + .forEach( + ssTableResult -> + ssTableResult + .getSstableComponents() + .forEach( + fileUploadResult -> + filesInMeta.put( + fileUploadResult + .getBackupPath(), + null))); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/BackupV2Service.java b/priam/src/main/java/com/netflix/priam/backupv2/BackupV2Service.java new file mode 100644 index 000000000..7889eaea7 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/BackupV2Service.java @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Inject; +import com.netflix.priam.backup.IncrementalBackup; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.identity.token.ITokenRetriever; +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.tuner.CassandraTunerService; + +/** + * Encapsulate the backup service 2.0 - Execute all the tasks required to run backup service. + * Created by aagrawal on 3/9/19. + */ +public class BackupV2Service implements IService { + private final PriamScheduler scheduler; + private final IConfiguration configuration; + private final IBackupRestoreConfig backupRestoreConfig; + private final SnapshotMetaTask snapshotMetaTask; + private final CassandraTunerService cassandraTunerService; + private final ITokenRetriever tokenRetriever; + + @Inject + public BackupV2Service( + IConfiguration configuration, + IBackupRestoreConfig backupRestoreConfig, + PriamScheduler scheduler, + SnapshotMetaTask snapshotMetaService, + CassandraTunerService cassandraTunerService, + ITokenRetriever tokenRetriever) { + this.configuration = configuration; + this.backupRestoreConfig = backupRestoreConfig; + this.scheduler = scheduler; + this.snapshotMetaTask = snapshotMetaService; + this.cassandraTunerService = cassandraTunerService; + this.tokenRetriever = tokenRetriever; + } + + @Override + public void scheduleService() throws Exception { + TaskTimer snapshotMetaTimer = SnapshotMetaTask.getTimer(backupRestoreConfig); + if (snapshotMetaTimer == null) { + SnapshotMetaTask.cleanOldBackups(configuration); + } + scheduleTask(scheduler, SnapshotMetaTask.class, snapshotMetaTimer); + + if (snapshotMetaTimer != null) { + // Try to upload previous snapshots, if any which might have been interrupted by Priam + // restart. + snapshotMetaTask.uploadFiles(); + + // Schedule the TTL service + TaskTimer timer = + BackupTTLTask.getTimer(backupRestoreConfig, tokenRetriever.getRingPosition()); + scheduleTask(scheduler, BackupTTLTask.class, timer); + + // Schedule the backup verification service + scheduleTask( + scheduler, + BackupVerificationTask.class, + BackupVerificationTask.getTimer(backupRestoreConfig)); + } else { + scheduler.deleteTask(BackupTTLTask.JOBNAME); + scheduler.deleteTask(BackupVerificationTask.JOBNAME); + } + + // Start the Incremental backup schedule if enabled + scheduleTask( + scheduler, + IncrementalBackup.class, + IncrementalBackup.getTimer(configuration, backupRestoreConfig)); + } + + @Override + public void updateServicePre() throws Exception { + // Update the cassandra to enable/disable new incremental files. + cassandraTunerService.onChangeUpdateService(); + } + + @Override + public void updateServicePost() throws Exception {} +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/BackupVerificationTask.java b/priam/src/main/java/com/netflix/priam/backupv2/BackupVerificationTask.java new file mode 100644 index 000000000..926b8513c --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/BackupVerificationTask.java @@ -0,0 +1,127 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import com.netflix.priam.backup.*; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.InstanceState; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.scheduler.CronTimer; +import com.netflix.priam.scheduler.Task; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.DateUtil.DateRange; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 1/28/19. */ +@Singleton +public class BackupVerificationTask extends Task { + private static final Logger logger = LoggerFactory.getLogger(BackupVerificationTask.class); + public static final String JOBNAME = "BackupVerificationService"; + + private IBackupRestoreConfig backupRestoreConfig; + private BackupVerification backupVerification; + private BackupMetrics backupMetrics; + private InstanceState instanceState; + private BackupNotificationMgr backupNotificationMgr; + + @Inject + public BackupVerificationTask( + IConfiguration configuration, + IBackupRestoreConfig backupRestoreConfig, + BackupVerification backupVerification, + BackupMetrics backupMetrics, + InstanceState instanceState, + BackupNotificationMgr backupNotificationMgr) { + super(configuration); + this.backupRestoreConfig = backupRestoreConfig; + this.backupVerification = backupVerification; + this.backupMetrics = backupMetrics; + this.instanceState = instanceState; + this.backupNotificationMgr = backupNotificationMgr; + } + + @Override + public void execute() throws Exception { + // Ensure that backup version 2.0 is actually enabled. + if (backupRestoreConfig.getSnapshotMetaServiceCronExpression().equals("-1")) { + logger.info("Skipping backup verification. V2 backups are not enabled."); + return; + } + + if (instanceState.getRestoreStatus() != null + && instanceState.getRestoreStatus().getStatus() != null + && instanceState.getRestoreStatus().getStatus() == Status.STARTED) { + logger.info("Skipping backup verification. Priam is in restore mode."); + return; + } + + // Validate the backup done in last x hours. + Instant now = DateUtil.getInstant(); + Instant slo = + now.minus(backupRestoreConfig.getBackupVerificationSLOInHours(), ChronoUnit.HOURS); + DateRange dateRange = new DateRange(slo, now); + List verificationResults = + backupVerification.verifyAllBackups(BackupVersion.SNAPSHOT_META_SERVICE, dateRange); + + verificationResults.forEach( + result -> { + logger.info( + "Sending {} message for backup: {}", + AbstractBackupPath.BackupFileType.SNAPSHOT_VERIFIED, + result.snapshotInstant); + backupNotificationMgr.notify(result); + }); + + if (!backupVerification + .verifyBackup(BackupVersion.SNAPSHOT_META_SERVICE, false /* force */, dateRange) + .isPresent()) { + logger.error( + "Not able to find any snapshot which is valid in our SLO window: {} hours", + backupRestoreConfig.getBackupVerificationSLOInHours()); + backupMetrics.incrementBackupVerificationFailure(); + } + } + + /** + * Interval between trying to verify data manifest file on Remote file system. + * + * @param backupRestoreConfig {@link IBackupRestoreConfig#getBackupVerificationCronExpression()} + * to get configuration details from priam. Use "-1" to disable the service. + * @return the timer to be used for snapshot verification service. + * @throws Exception if the configuration is not set correctly or are not valid. This is to + * ensure we fail-fast. + */ + public static TaskTimer getTimer(IBackupRestoreConfig backupRestoreConfig) throws Exception { + String cronExpression = backupRestoreConfig.getBackupVerificationCronExpression(); + return CronTimer.getCronTimer(JOBNAME, cronExpression); + } + + @Override + public String getName() { + return JOBNAME; + } +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/ColumnFamilyResult.java b/priam/src/main/java/com/netflix/priam/backupv2/ColumnFamilyResult.java new file mode 100644 index 000000000..5c273821e --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/ColumnFamilyResult.java @@ -0,0 +1,76 @@ +/** + * Copyright 2018 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.priam.backupv2; + +import com.google.common.collect.ImmutableSet; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +/** + * This is a POJO to encapsulate all the SSTables for a given column family. Created by aagrawal on + * 7/1/18. + */ +public class ColumnFamilyResult { + private String keyspaceName; + private String columnfamilyName; + private List sstables = new ArrayList<>(); + + public ColumnFamilyResult(String keyspaceName, String columnfamilyName) { + this.keyspaceName = keyspaceName; + this.columnfamilyName = columnfamilyName; + } + + public List getSstables() { + return sstables; + } + + public void setSstables(List sstables) { + this.sstables = sstables; + } + + public void addSstable(SSTableResult sstable) { + if (sstables == null) sstables = new ArrayList<>(); + sstables.add(sstable); + } + + @Override + public String toString() { + return GsonJsonSerializer.getGson().toJson(this); + } + + /** This is a POJO to encapsulate a SSTable and all its components. */ + public static class SSTableResult { + private String prefix; + private Set sstableComponents; + + public void setPrefix(String prefix) { + this.prefix = prefix; + } + + public Set getSstableComponents() { + return sstableComponents; + } + + public void setSstableComponents(ImmutableSet sstableComponents) { + this.sstableComponents = sstableComponents; + } + + @Override + public String toString() { + return GsonJsonSerializer.getGson().toJson(this); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/ColumnfamilyResult.java b/priam/src/main/java/com/netflix/priam/backupv2/ColumnfamilyResult.java deleted file mode 100644 index e97eb17ca..000000000 --- a/priam/src/main/java/com/netflix/priam/backupv2/ColumnfamilyResult.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.backupv2; - -import com.netflix.priam.utils.GsonJsonSerializer; - -import java.util.ArrayList; -import java.util.List; - -/** - * This is a POJO to encapsulate all the SSTables for a given column family. - * Created by aagrawal on 7/1/18. - */ -public class ColumnfamilyResult { - private String keyspaceName; - private String columnfamilyName; - private List sstables = new ArrayList<>(); - - public ColumnfamilyResult(String keyspaceName, String columnfamilyName) { - this.keyspaceName = keyspaceName; - this.columnfamilyName = columnfamilyName; - } - - public String getKeyspaceName() { - return keyspaceName; - } - - public void setKeyspaceName(String keyspaceName) { - this.keyspaceName = keyspaceName; - } - - public String getColumnfamilyName() { - return columnfamilyName; - } - - public void setColumnfamilyName(String columnfamilyName) { - this.columnfamilyName = columnfamilyName; - } - - public List getSstables() { - return sstables; - } - - public void setSstables(List sstables) { - this.sstables = sstables; - } - - public void addSstable(SSTableResult sstable) { - if (sstables == null) - sstables = new ArrayList<>(); - sstables.add(sstable); - } - - @Override - public String toString() { - return GsonJsonSerializer.getGson().toJson(this); - } - - /** - * This is a POJO to encapsulate a SSTable and all its components. - */ - public static class SSTableResult { - private String prefix; - private List sstableComponents; - - public String getPrefix() { - return prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - public List getSstableComponents() { - return sstableComponents; - } - - public void setSstableComponents(List sstableComponents) { - this.sstableComponents = sstableComponents; - } - - @Override - public String toString() { - return GsonJsonSerializer.getGson().toJson(this); - } - } -} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/FileUploadResult.java b/priam/src/main/java/com/netflix/priam/backupv2/FileUploadResult.java index 8d0268bb1..dc01a5a81 100644 --- a/priam/src/main/java/com/netflix/priam/backupv2/FileUploadResult.java +++ b/priam/src/main/java/com/netflix/priam/backupv2/FileUploadResult.java @@ -16,113 +16,79 @@ */ package com.netflix.priam.backupv2; -import com.netflix.priam.compress.ICompression; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.cryptography.CryptographyAlgorithm; import com.netflix.priam.utils.GsonJsonSerializer; -import org.codehaus.jettison.json.JSONObject; - import java.io.File; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.attribute.BasicFileAttributes; import java.time.Instant; /** - * This is a POJO that will encapsulate the result of file upload. - * Created by aagrawal on 6/20/18. + * This is a POJO that will encapsulate the result of file upload. Created by aagrawal on 6/20/18. */ public class FileUploadResult { - private Path fileName; - @GsonJsonSerializer.PriamAnnotation.GsonIgnore - private String keyspaceName; - @GsonJsonSerializer.PriamAnnotation.GsonIgnore - private String columnFamilyName; - private Instant lastModifiedTime; - private Instant fileCreationTime; - private long fileSizeOnDisk; //Size on disk in bytes - private Boolean isUploaded; - //Valid compression technique for now is SNAPPY only. Future we need to support LZ4 and NONE - private ICompression.CompressionAlgorithm compression = ICompression.CompressionAlgorithm.SNAPPY; - private Path backupPath; + private final Path fileName; + private final Instant lastModifiedTime; + private final Instant fileCreationTime; + private final long fileSizeOnDisk; // Size on disk in bytes + // Valid compression technique for now is SNAPPY only. Future we need to support LZ4 and NONE + private final CompressionType compression; + // Valid encryption technique for now is PLAINTEXT only. In future we will support pgp and more. + private final CryptographyAlgorithm encryption; - public FileUploadResult(Path fileName, String keyspaceName, String columnFamilyName, Instant lastModifiedTime, Instant fileCreationTime, long fileSizeOnDisk) { + private Boolean isUploaded; + private String backupPath; + + @VisibleForTesting + public FileUploadResult( + Path fileName, + Instant lastModifiedTime, + Instant fileCreationTime, + long fileSizeOnDisk) { this.fileName = fileName; - this.keyspaceName = keyspaceName; - this.columnFamilyName = columnFamilyName; this.lastModifiedTime = lastModifiedTime; this.fileCreationTime = fileCreationTime; this.fileSizeOnDisk = fileSizeOnDisk; + this.compression = CompressionType.SNAPPY; + this.encryption = CryptographyAlgorithm.PLAINTEXT; } - public static FileUploadResult getFileUploadResult(String keyspaceName, String columnFamilyName, Path file) throws Exception { - BasicFileAttributes fileAttributes = Files.readAttributes(file, BasicFileAttributes.class); - return new FileUploadResult(file, keyspaceName, columnFamilyName, fileAttributes.lastModifiedTime().toInstant(), fileAttributes.creationTime().toInstant(), fileAttributes.size()); - } - - public static FileUploadResult getFileUploadResult(String keyspaceName, String columnFamilyName, File file) throws Exception { - return getFileUploadResult(keyspaceName, columnFamilyName, file.toPath()); - } - - public Path getFileName() { - return fileName; - } - - public String getKeyspaceName() { - return keyspaceName; - } - - public String getColumnFamilyName() { - return columnFamilyName; - } - - public Instant getLastModifiedTime() { - return lastModifiedTime; - } - - public Instant getFileCreationTime() { - return fileCreationTime; - } - - public long getFileSizeOnDisk() { - return fileSizeOnDisk; - } - - public Boolean getUploaded() { - return isUploaded; + public FileUploadResult(AbstractBackupPath path) { + Preconditions.checkArgument(path.getLastModified().toEpochMilli() > 0); + Preconditions.checkArgument(path.getCreationTime().toEpochMilli() > 0); + File file = path.getBackupFile(); + this.fileName = file.toPath(); + this.backupPath = path.getRemotePath(); + this.lastModifiedTime = path.getLastModified(); + this.fileCreationTime = path.getCreationTime(); + this.fileSizeOnDisk = path.getSize(); + this.compression = path.getCompression(); + this.encryption = path.getEncryption(); } public void setUploaded(Boolean uploaded) { isUploaded = uploaded; } - public ICompression.CompressionAlgorithm getCompression() { - return compression; + public Boolean getIsUploaded() { + return isUploaded; } - public void setCompression(ICompression.CompressionAlgorithm compression) { - this.compression = compression; + public Path getFileName() { + return fileName; } - public Path getBackupPath() { + public String getBackupPath() { return backupPath; } - public void setBackupPath(Path backupPath) { + public void setBackupPath(String backupPath) { this.backupPath = backupPath; } - // - public JSONObject getJSONObject() throws Exception { - JSONObject result = new JSONObject(); - result.put("file", fileName.toFile().getName()); - result.put("modify", lastModifiedTime.toEpochMilli()); - result.put("creation", fileCreationTime.toEpochMilli()); - result.put("size", fileSizeOnDisk); - result.put("compression", compression.name()); - result.put("uploaded", isUploaded); - result.put("loc", backupPath); - return result; - } - @Override public String toString() { return GsonJsonSerializer.getGson().toJson(this); diff --git a/priam/src/main/java/com/netflix/priam/backupv2/ForgottenFilesManager.java b/priam/src/main/java/com/netflix/priam/backupv2/ForgottenFilesManager.java new file mode 100644 index 000000000..d1ea10760 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/ForgottenFilesManager.java @@ -0,0 +1,212 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Inject; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Collection; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.filefilter.FileFilterUtils; +import org.apache.commons.io.filefilter.IOFileFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 1/1/19. */ +public class ForgottenFilesManager { + private static final Logger logger = LoggerFactory.getLogger(ForgottenFilesManager.class); + + private BackupMetrics backupMetrics; + private IConfiguration config; + private static final String TMP_EXT = ".tmp"; + + private static final Pattern tmpFilePattern = + Pattern.compile("^((.*)\\-(.*)\\-)?tmp(link)?\\-((?:l|k).)\\-(\\d)*\\-(.*)$"); + + protected static final String LOST_FOUND = "lost+found"; + + @Inject + public ForgottenFilesManager(IConfiguration configuration, BackupMetrics backupMetrics) { + this.config = configuration; + this.backupMetrics = backupMetrics; + } + + public void findAndMoveForgottenFiles(Instant snapshotInstant, File snapshotDir) { + try { + Collection snapshotFiles = + FileUtils.listFiles(snapshotDir, FileFilterUtils.fileFileFilter(), null); + File columnfamilyDir = snapshotDir.getParentFile().getParentFile(); + Collection columnfamilyFiles = + getColumnfamilyFiles(snapshotInstant, columnfamilyDir); + + // Remove the SSTable(s) which are part of snapshot from the CF file list. + // This cannot be a simple removeAll as snapshot files have "different" file folder + // prefix. + for (File file : snapshotFiles) { + // Get its parent directory file based on this file. + File originalFile = new File(columnfamilyDir, file.getName()); + columnfamilyFiles.remove(originalFile); + } + + // If there are no "extra" SSTables in CF data folder, we are done. + if (columnfamilyFiles.size() == 0) return; + + logger.warn( + "# of potential forgotten files: {} found for CF: {}", + columnfamilyFiles.size(), + columnfamilyDir.getName()); + + // Move the files to lost_found directory if configured. + moveForgottenFiles(columnfamilyDir, columnfamilyFiles); + + } catch (Exception e) { + // Eat the exception, if there, for any reason. This should not stop the snapshot for + // any reason. + logger.error( + "Exception occurred while trying to find forgottenFile. Ignoring the error and continuing with remaining backup", + e); + e.printStackTrace(); + } + } + + protected Collection getColumnfamilyFiles(Instant snapshotInstant, File columnfamilyDir) { + // Find all the files in columnfamily folder which is : + // 1. Not a temp file. + // 2. Is a file. (we don't care about directories) + // 3. Is older than snapshot time, as new files keep getting created after taking a + // snapshot. + IOFileFilter tmpFileFilter1 = FileFilterUtils.suffixFileFilter(TMP_EXT); + IOFileFilter tmpFileFilter2 = + FileFilterUtils.asFileFilter( + pathname -> tmpFilePattern.matcher(pathname.getName()).matches()); + IOFileFilter tmpFileFilter = FileFilterUtils.or(tmpFileFilter1, tmpFileFilter2); + /* + Here we are allowing files which were more than + @link{IConfiguration#getGracePeriodDaysForCompaction}. We do this to allow cassandra + to have files which were generated as part of long running compaction. + Refer to https://issues.apache.org/jira/browse/CASSANDRA-6756 and + https://issues.apache.org/jira/browse/CASSANDRA-7066 + for more information. + */ + IOFileFilter ageFilter = + FileFilterUtils.ageFileFilter( + snapshotInstant + .minus(config.getGracePeriodDaysForCompaction(), ChronoUnit.DAYS) + .toEpochMilli()); + IOFileFilter fileFilter = + FileFilterUtils.and( + FileFilterUtils.notFileFilter(tmpFileFilter), + FileFilterUtils.fileFileFilter(), + ageFilter); + + return FileUtils.listFiles(columnfamilyDir, fileFilter, null); + } + + protected void moveForgottenFiles(File columnfamilyDir, Collection columnfamilyFiles) + throws IOException { + // This is a list of potential forgotten file(s). Note that C* might still be using + // files as part of read, so we really do not want to move them until we meet the + // @link{IConfiguration#getForgottenFileGracePeriodDaysForRead} window elapses. + + final Path destDir = Paths.get(columnfamilyDir.getAbsolutePath(), LOST_FOUND); + FileUtils.forceMkdir(destDir.toFile()); + final Collection columnfamilyPaths = + columnfamilyFiles + .parallelStream() + .map(file -> Paths.get(file.getAbsolutePath())) + .collect(Collectors.toList()); + + for (Path file : columnfamilyPaths) { + try { + final Path symbolic_link = + Paths.get(destDir.toFile().getAbsolutePath(), file.toFile().getName()); + // Lets see if there is a symbolic link to this file already? + if (!Files.exists(symbolic_link)) { + // If not, lets create one and work on next file. + Files.createSymbolicLink(symbolic_link, file); + continue; + } else if (Files.isSymbolicLink(symbolic_link)) { + // Symbolic link exists, is it older than our timeframe? + Instant last_modified_time = + Files.getLastModifiedTime(symbolic_link, LinkOption.NOFOLLOW_LINKS) + .toInstant(); + if (DateUtil.getInstant() + .isAfter( + last_modified_time.plus( + config.getForgottenFileGracePeriodDaysForRead(), + ChronoUnit.DAYS))) { + // Eligible for move. + logger.info( + "Eligible for move: Forgotten file: {} found for CF: {}", + file, + columnfamilyDir.getName()); + backupMetrics.incrementForgottenFiles(1); + if (config.isForgottenFileMoveEnabled()) { + try { + // Remove our symbolic link. Note that deletion of symbolic link + // does not remove the original file. + Files.delete(symbolic_link); + FileUtils.moveFileToDirectory( + file.toFile(), destDir.toFile(), true); + logger.warn( + "Successfully moved forgotten file: {} found for CF: {}", + file, + columnfamilyDir.getName()); + } catch (IOException e) { + logger.error( + "Exception occurred while trying to move forgottenFile: {}. Ignoring the error and continuing with remaining backup/forgotten files.", + file); + e.printStackTrace(); + } + } + } + } + + } catch (IOException e) { + logger.error("Forgotten file: Error while trying to process the file: {}", file); + e.printStackTrace(); + } + } + + // Clean LOST_FOUND directory of any previous symbolic link files which are not considered + // lost any more. + for (File file : FileUtils.listFiles(destDir.toFile(), null, false)) { + Path filePath = Paths.get(file.getAbsolutePath()); + if (Files.isSymbolicLink(filePath)) { + Path originalFile = Files.readSymbolicLink(filePath); + if (!columnfamilyPaths.contains(originalFile)) { + Files.delete(filePath); + logger.info( + "Deleting the symbolic link as it is not considered as lost anymore. filePath: {}", + filePath); + } + } + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/IMetaProxy.java b/priam/src/main/java/com/netflix/priam/backupv2/IMetaProxy.java new file mode 100644 index 000000000..99c52abb2 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/IMetaProxy.java @@ -0,0 +1,97 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.backup.BackupVerificationResult; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.List; + +/** Proxy to do management tasks for meta files. Created by aagrawal on 12/18/18. */ +public interface IMetaProxy { + + /** + * Path on the local file system where meta file should be stored for processing. + * + * @return location on local file system. + */ + Path getLocalMetaFileDirectory(); + + /** + * Get the prefix for the manifest file. This will depend on the configuration, if restore + * prefix is set. + * + * @param dateRange date range for which we are trying to find manifest files. + * @return prefix for the manifest files. + */ + String getMetaPrefix(DateUtil.DateRange dateRange); + + /** + * Fetch the list of all manifest files on the remote file system for the provided valid + * daterange. + * + * @param dateRange the time period to scan in the remote file system for meta files. + * @return List of all the manifest files from the remote file system. + */ + List findMetaFiles(DateUtil.DateRange dateRange); + + /** + * Download the meta file to disk. + * + * @param meta AbstractBackupPath denoting the meta file on remote file system. + * @return the location of the meta file on disk after downloading from remote file system. + * @throws BackupRestoreException if unable to download for any reason. + */ + Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException; + + /** + * Read the manifest file and give the contents of the file (all the sstable components) as + * list. + * + * @param localMetaPath location of the manifest file on disk. + * @return list containing all the remote locations of sstable components. + * @throws Exception if file is not found on local system or is corrupt. + */ + List getSSTFilesFromMeta(Path localMetaPath) throws Exception; + + /** + * Get the list of incremental files given the daterange. + * + * @param dateRange the time period to scan in the remote file system for incremental files. + * @return iterator containing the list of path on the remote file system satisfying criteria. + * @throws BackupRestoreException if there is an issue contacting remote file system. + */ + Iterator getIncrementals(DateUtil.DateRange dateRange) + throws BackupRestoreException; + + /** + * Validate that all the files mentioned in the meta file actually exists on remote file system. + * + * @param metaBackupPath Path to the remote meta file. + * @return backupVerificationResult containing the information like valid - if all the files + * mentioned in meta file are present on remote file system. It will return false in case of + * any error. + */ + BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath); + + /** Delete the old meta files, if any present in the metaFileDirectory */ + void cleanupOldMetaFiles(); +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileInfo.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileInfo.java index 72d76c23d..8a734811a 100644 --- a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileInfo.java +++ b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileInfo.java @@ -18,20 +18,20 @@ import com.netflix.priam.utils.DateUtil; import com.netflix.priam.utils.GsonJsonSerializer; - import java.time.Instant; import java.util.List; -/** - * This POJO class encapsulates the information for a meta file. - */ +/** This POJO class encapsulates the information for a meta file. */ public class MetaFileInfo { @GsonJsonSerializer.PriamAnnotation.GsonIgnore public static final String META_FILE_PREFIX = "meta_v2_"; + @GsonJsonSerializer.PriamAnnotation.GsonIgnore public static final String META_FILE_SUFFIX = ".json"; + @GsonJsonSerializer.PriamAnnotation.GsonIgnore public static final String META_FILE_INFO = "info"; + @GsonJsonSerializer.PriamAnnotation.GsonIgnore public static final String META_FILE_DATA = "data"; @@ -95,6 +95,8 @@ public String toString() { } public static String getMetaFileName(Instant instant) { - return MetaFileInfo.META_FILE_PREFIX + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, instant) + MetaFileInfo.META_FILE_SUFFIX; + return MetaFileInfo.META_FILE_PREFIX + + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, instant) + + MetaFileInfo.META_FILE_SUFFIX; } } diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileManager.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileManager.java deleted file mode 100644 index 85b8a17fa..000000000 --- a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileManager.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backupv2; - -import com.netflix.priam.config.IConfiguration; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.commons.io.filefilter.IOFileFilter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import java.io.File; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collection; - -/** - * Do any management task for meta files. - * Created by aagrawal on 8/2/18. - */ -public class MetaFileManager { - private static final Logger logger = LoggerFactory.getLogger(MetaFileManager.class); - private final Path metaFileDirectory; - - @Inject - MetaFileManager(IConfiguration configuration){ - metaFileDirectory = Paths.get(configuration.getDataFileLocation()); - } - - public Path getMetaFileDirectory(){ - return metaFileDirectory; - } - - /** - * Delete the old meta files, if any present in the metaFileDirectory - */ - public void cleanupOldMetaFiles() { - logger.info("Deleting any old META_V2 files if any"); - IOFileFilter fileNameFilter = FileFilterUtils.and(FileFilterUtils.prefixFileFilter(MetaFileInfo.META_FILE_PREFIX), - FileFilterUtils.or(FileFilterUtils.suffixFileFilter(MetaFileInfo.META_FILE_SUFFIX), - FileFilterUtils.suffixFileFilter(MetaFileInfo.META_FILE_SUFFIX + ".tmp"))); - Collection files = FileUtils.listFiles(metaFileDirectory.toFile(), fileNameFilter, null); - files.stream().filter(file -> file.isFile()).forEach(file -> { - logger.debug("Deleting old META_V2 file found: {}", file.getAbsolutePath()); - file.delete(); - }); - } - -} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileReader.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileReader.java index a8df6c6ab..701bf21d0 100644 --- a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileReader.java +++ b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileReader.java @@ -19,22 +19,20 @@ import com.google.gson.stream.JsonReader; import com.netflix.priam.utils.DateUtil; import com.netflix.priam.utils.GsonJsonSerializer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.nio.file.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This abstract class encapsulates the reading of meta file in streaming fashion. This is required as we could have a meta file which cannot fit in memory. - * Created by aagrawal on 7/3/18. + * This abstract class encapsulates the reading of meta file in streaming fashion. This is required + * as we could have a meta file which cannot fit in memory. Created by aagrawal on 7/3/18. */ public abstract class MetaFileReader { private static final Logger logger = LoggerFactory.getLogger(MetaFileReader.class); - private JsonReader jsonReader; private MetaFileInfo metaFileInfo; public MetaFileInfo getMetaFileInfo() { @@ -45,27 +43,34 @@ public MetaFileInfo getMetaFileInfo() { * Reads the local meta file as denoted by metaFilePath. * * @param metaFilePath local file path for the meta file. - * @throws IOException + * @throws IOException if not enough permissions or file is not valid format. */ public void readMeta(Path metaFilePath) throws IOException { - //Validate if meta file exists and is right file name. - if (metaFilePath == null || !metaFilePath.toFile().exists() || !metaFilePath.toFile().isFile() || !isValidMetaFile(metaFilePath)) { - throw new FileNotFoundException("MetaFilePath: " + metaFilePath + " do not exist or is not valid meta file."); + // Validate if meta file exists and is right file name. + if (metaFilePath == null + || !metaFilePath.toFile().exists() + || !metaFilePath.toFile().isFile() + || !isValidMetaFile(metaFilePath)) { + throw new FileNotFoundException( + "MetaFilePath: " + metaFilePath + " do not exist or is not valid meta file."); } - //Read the meta file. + // Read the meta file. logger.info("Trying to read the meta file: {}", metaFilePath); - jsonReader = new JsonReader(new FileReader(metaFilePath.toFile())); + JsonReader jsonReader = new JsonReader(new FileReader(metaFilePath.toFile())); jsonReader.beginObject(); while (jsonReader.hasNext()) { switch (jsonReader.nextName()) { case MetaFileInfo.META_FILE_INFO: - metaFileInfo = GsonJsonSerializer.getGson().fromJson(jsonReader, MetaFileInfo.class); + metaFileInfo = + GsonJsonSerializer.getGson().fromJson(jsonReader, MetaFileInfo.class); break; case MetaFileInfo.META_FILE_DATA: jsonReader.beginArray(); while (jsonReader.hasNext()) - process(GsonJsonSerializer.getGson().fromJson(jsonReader, ColumnfamilyResult.class)); + process( + GsonJsonSerializer.getGson() + .fromJson(jsonReader, ColumnFamilyResult.class)); jsonReader.endArray(); } } @@ -74,13 +79,13 @@ public void readMeta(Path metaFilePath) throws IOException { logger.info("Finished reading the meta file: {}", metaFilePath); } - /** * Process the columnfamily result obtained after reading meta file. * - * @param columnfamilyResult {@link ColumnfamilyResult} POJO containing the column family data (all SSTables references) obtained from meta.json. + * @param columnfamilyResult {@link ColumnFamilyResult} POJO containing the column family data + * (all SSTables references) obtained from meta.json. */ - public abstract void process(ColumnfamilyResult columnfamilyResult); + public abstract void process(ColumnFamilyResult columnfamilyResult); /** * Returns if it is a valid meta file name. @@ -90,9 +95,13 @@ public void readMeta(Path metaFilePath) throws IOException { */ public boolean isValidMetaFile(Path metaFilePath) { String fileName = metaFilePath.toFile().getName(); - if (fileName.startsWith(MetaFileInfo.META_FILE_PREFIX) && fileName.endsWith(MetaFileInfo.META_FILE_SUFFIX)) { - //is valid date? - String dateString = fileName.substring(MetaFileInfo.META_FILE_PREFIX.length(), fileName.length() - MetaFileInfo.META_FILE_SUFFIX.length()); + if (fileName.startsWith(MetaFileInfo.META_FILE_PREFIX) + && fileName.endsWith(MetaFileInfo.META_FILE_SUFFIX)) { + // is valid date? + String dateString = + fileName.substring( + MetaFileInfo.META_FILE_PREFIX.length(), + fileName.length() - MetaFileInfo.META_FILE_SUFFIX.length()); DateUtil.parseInstant(dateString); return true; } diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileWriterBuilder.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileWriterBuilder.java index ffccdeec1..cc7e77f2e 100644 --- a/priam/src/main/java/com/netflix/priam/backupv2/MetaFileWriterBuilder.java +++ b/priam/src/main/java/com/netflix/priam/backupv2/MetaFileWriterBuilder.java @@ -16,19 +16,16 @@ */ package com.netflix.priam.backupv2; +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.ImmutableSet; import com.google.gson.stream.JsonWriter; import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.IBackupFileSystem; import com.netflix.priam.backup.IFileSystemContext; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.utils.RetryableCallable; -import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; import java.io.FileWriter; import java.io.IOException; import java.nio.file.Path; @@ -36,17 +33,22 @@ import java.time.Instant; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; +import javax.inject.Inject; +import javax.inject.Named; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This class will help in generation of meta.json files. This will encapsulate all the SSTables that were there - * on the file system. This will write the meta.json file as a JSON blob. - * NOTE: We want to ensure that it is done via streaming JSON write to ensure we do not consume memory to load all - * these objects in memory. With multi-tenant clusters or LCS enabled on large number of CF's it is easy to have 1000's - * of SSTables (thus 1000's of SSTable components) across CF's. + * This class will help in generation of meta.json files. This will encapsulate all the SSTables + * that were there on the file system. This will write the meta.json file as a JSON blob. NOTE: We + * want to ensure that it is done via streaming JSON write to ensure we do not consume memory to + * load all these objects in memory. With multi-tenant clusters or LCS enabled on large number of + * CF's it is easy to have 1000's of SSTables (thus 1000's of SSTable components) across CF's. * Created by aagrawal on 6/12/18. */ public class MetaFileWriterBuilder { - private MetaFileWriter metaFileWriter; + private final MetaFileWriter metaFileWriter; private static final Logger logger = LoggerFactory.getLogger(MetaFileWriterBuilder.class); @Inject @@ -63,44 +65,65 @@ public interface StartStep { } public interface DataStep { - DataStep addColumnfamilyResult(ColumnfamilyResult columnfamilyResult) throws IOException; + ColumnFamilyResult addColumnfamilyResult( + String keyspace, + String columnFamily, + ImmutableMultimap sstables) + throws IOException; + UploadStep endMetaFileGeneration() throws IOException; } public interface UploadStep { - void uploadMetaFile(boolean deleteOnSuccess) throws Exception; + void uploadMetaFile() throws Exception; + Path getMetaFilePath(); + + String getRemoteMetaFilePath() throws Exception; } public static class MetaFileWriter implements StartStep, DataStep, UploadStep { private final Provider pathFactory; private final IBackupFileSystem backupFileSystem; - private MetaFileInfo metaFileInfo; - private MetaFileManager metaFileManager; + private final MetaFileInfo metaFileInfo; + private final IMetaProxy metaProxy; private JsonWriter jsonWriter; + private Instant snapshotInstant; private Path metaFilePath; @Inject - private MetaFileWriter(IConfiguration configuration, InstanceIdentity instanceIdentity, Provider pathFactory, IFileSystemContext backupFileSystemCtx, MetaFileManager metaFileManager) { + private MetaFileWriter( + IConfiguration configuration, + InstanceIdentity instanceIdentity, + Provider pathFactory, + IFileSystemContext backupFileSystemCtx, + @Named("v2") IMetaProxy metaProxy) { this.pathFactory = pathFactory; this.backupFileSystem = backupFileSystemCtx.getFileStrategy(configuration); - this.metaFileManager = metaFileManager; + this.metaProxy = metaProxy; List backupIdentifier = new ArrayList<>(); backupIdentifier.add(instanceIdentity.getInstance().getToken()); - metaFileInfo = new MetaFileInfo(configuration.getAppName(), configuration.getDC(), configuration.getRac(), backupIdentifier); + metaFileInfo = + new MetaFileInfo( + configuration.getAppName(), + instanceIdentity.getInstanceInfo().getRegion(), + instanceIdentity.getInstanceInfo().getRac(), + backupIdentifier); } /** * Start the generation of meta file. * - * @throws IOException + * @throws IOException if unable to write to meta file (permissions, disk full etc) */ public DataStep startMetaFileGeneration(Instant snapshotInstant) throws IOException { - //Compute meta file name. + // Compute meta file name. + this.snapshotInstant = snapshotInstant; String fileName = MetaFileInfo.getMetaFileName(snapshotInstant); - metaFilePath = Paths.get(metaFileManager.getMetaFileDirectory().toString(), fileName); - Path tempMetaFilePath = Paths.get(metaFileManager.getMetaFileDirectory().toString(), fileName + ".tmp"); + metaFilePath = Paths.get(metaProxy.getLocalMetaFileDirectory().toString(), fileName); + Path tempMetaFilePath = + Paths.get(metaProxy.getLocalMetaFileDirectory().toString(), fileName + ".tmp"); logger.info("Starting to write a new meta file: {}", metaFilePath); @@ -114,38 +137,52 @@ public DataStep startMetaFileGeneration(Instant snapshotInstant) throws IOExcept } /** - * Add {@link ColumnfamilyResult} after it has been processed so it can be streamed to meta.json. Streaming write to meta.json is required so we don't get Priam OOM. + * Add {@link ColumnFamilyResult} after it has been processed so it can be streamed to + * meta.json. Streaming write to meta.json is required so we don't get Priam OOM. * - * @param columnfamilyResult a POJO encapsulating the column family result - * @throws IOException + * @throws IOException if unable to write to the file or if JSON is not valid */ - public MetaFileWriterBuilder.DataStep addColumnfamilyResult(ColumnfamilyResult columnfamilyResult) throws IOException { + public ColumnFamilyResult addColumnfamilyResult( + String keyspace, + String columnFamily, + ImmutableMultimap sstables) + throws IOException { + if (jsonWriter == null) - throw new NullPointerException("addColumnfamilyResult: Json Writer in MetaFileWriter is null. This should not happen!"); - if (columnfamilyResult == null) - throw new NullPointerException("Column family result is null in MetaFileWriter. This should not happen!"); - jsonWriter.jsonValue(columnfamilyResult.toString()); - return this; + throw new NullPointerException( + "addColumnfamilyResult: Json Writer in MetaFileWriter is null. This should not happen!"); + ColumnFamilyResult result = toColumnFamilyResult(keyspace, columnFamily, sstables); + jsonWriter.jsonValue(result.toString()); + return result; } /** * Finish the generation of meta.json file and save it on local media. * * @return {@link Path} to the local meta.json produced. - * @throws IOException + * @throws IOException if unable to write to file or if JSON is not valid */ public MetaFileWriterBuilder.UploadStep endMetaFileGeneration() throws IOException { if (jsonWriter == null) - throw new NullPointerException("endMetaFileGeneration: Json Writer in MetaFileWriter is null. This should not happen!"); + throw new NullPointerException( + "endMetaFileGeneration: Json Writer in MetaFileWriter is null. This should not happen!"); jsonWriter.endArray(); jsonWriter.endObject(); jsonWriter.close(); - Path tempMetaFilePath = Paths.get(metaFileManager.getMetaFileDirectory().toString(), metaFilePath.toFile().getName() + ".tmp"); + Path tempMetaFilePath = + Paths.get( + metaProxy.getLocalMetaFileDirectory().toString(), + metaFilePath.toFile().getName() + ".tmp"); - //Rename the tmp file. + // Rename the tmp file. tempMetaFilePath.toFile().renameTo(metaFilePath.toFile()); + + // Set the last modified time to snapshot time as generating manifest file may take some + // time. + metaFilePath.toFile().setLastModified(snapshotInstant.toEpochMilli()); + logger.info("Finished writing to meta file: {}", metaFilePath); return this; @@ -154,27 +191,59 @@ public MetaFileWriterBuilder.UploadStep endMetaFileGeneration() throws IOExcepti /** * Upload the meta file generated to backup file system. * - * @param deleteOnSuccess delete the meta file from local file system if backup is successful. Useful for testing purposes * @throws Exception when unable to upload the meta file. */ - public void uploadMetaFile(boolean deleteOnSuccess) throws Exception { + public void uploadMetaFile() throws Exception { AbstractBackupPath abstractBackupPath = pathFactory.get(); - abstractBackupPath.parseLocal(metaFilePath.toFile(), AbstractBackupPath.BackupFileType.META_V2); - new RetryableCallable(6, 5000) { - @Override - public Void retriableCall() throws Exception { - backupFileSystem.upload(abstractBackupPath, abstractBackupPath.localReader()); - abstractBackupPath.setCompressedFileSize(backupFileSystem.getBytesUploaded()); - return null; - } - }.call(); - - if (deleteOnSuccess) - FileUtils.deleteQuietly(metaFilePath.toFile()); + abstractBackupPath.parseLocal( + metaFilePath.toFile(), AbstractBackupPath.BackupFileType.META_V2); + backupFileSystem.uploadAndDelete(abstractBackupPath, false /* async */); } - public Path getMetaFilePath(){ + public Path getMetaFilePath() { return metaFilePath; } + + public String getRemoteMetaFilePath() throws Exception { + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal( + metaFilePath.toFile(), AbstractBackupPath.BackupFileType.META_V2); + return abstractBackupPath.getRemotePath(); + } + + private ColumnFamilyResult toColumnFamilyResult( + String keyspace, + String columnFamily, + ImmutableMultimap sstables) { + ColumnFamilyResult columnfamilyResult = new ColumnFamilyResult(keyspace, columnFamily); + sstables.keySet() + .stream() + .map(k -> toSSTableResult(k, sstables.get(k))) + .forEach(columnfamilyResult::addSstable); + return columnfamilyResult; + } + + private ColumnFamilyResult.SSTableResult toSSTableResult( + String prefix, ImmutableCollection sstable) { + ColumnFamilyResult.SSTableResult ssTableResult = new ColumnFamilyResult.SSTableResult(); + ssTableResult.setPrefix(prefix); + ssTableResult.setSstableComponents( + ImmutableSet.copyOf( + sstable.stream() + .map(this::toFileUploadResult) + .collect(Collectors.toSet()))); + return ssTableResult; + } + + private FileUploadResult toFileUploadResult(AbstractBackupPath path) { + FileUploadResult fileUploadResult = new FileUploadResult(path); + try { + Path backupPath = Paths.get(fileUploadResult.getBackupPath()); + fileUploadResult.setUploaded(backupFileSystem.checkObjectExists(backupPath)); + } catch (Exception e) { + logger.error("Error checking if file exists. Ignoring as it is not fatal.", e); + } + return fileUploadResult; + } } } diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaV1Proxy.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaV1Proxy.java new file mode 100644 index 000000000..3569b7345 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/MetaV1Proxy.java @@ -0,0 +1,188 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import com.netflix.priam.backup.*; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import java.io.FileReader; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.temporal.ChronoUnit; +import java.util.*; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.iterators.FilterIterator; +import org.apache.commons.io.FileUtils; +import org.json.simple.parser.JSONParser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 12/18/18. */ +public class MetaV1Proxy implements IMetaProxy { + private static final Logger logger = LoggerFactory.getLogger(MetaV1Proxy.class); + private final IBackupFileSystem fs; + + @Inject + MetaV1Proxy(IConfiguration configuration, IFileSystemContext backupFileSystemCtx) { + fs = backupFileSystemCtx.getFileStrategy(configuration); + } + + @Override + public Path getLocalMetaFileDirectory() { + return null; + } + + @Override + public String getMetaPrefix(DateUtil.DateRange dateRange) { + return null; + } + + @Override + public List findMetaFiles(DateUtil.DateRange dateRange) { + Date startTime = new Date(dateRange.getStartTime().toEpochMilli()); + Date endTime = new Date(dateRange.getEndTime().toEpochMilli()); + String restorePrefix = fs.getPrefix().toString(); + logger.debug("Looking for snapshot meta file within restore prefix: {}", restorePrefix); + List metas = Lists.newArrayList(); + + Iterator backupfiles = fs.list(restorePrefix, startTime, endTime); + + while (backupfiles.hasNext()) { + AbstractBackupPath path = backupfiles.next(); + if (path.getType() == AbstractBackupPath.BackupFileType.META) + // Since there are now meta file for incrementals as well as snapshot, we need to + // find the correct one (i.e. the snapshot meta file (meta.json)) + if (path.getFileName().equalsIgnoreCase("meta.json")) { + metas.add(path); + } + } + + metas.sort(Collections.reverseOrder()); + + if (metas.size() == 0) { + logger.info( + "No meta v1 file found on remote file system for the time period: {}", + dateRange); + } + + return metas; + } + + @Override + public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) { + BackupVerificationResult result = new BackupVerificationResult(); + result.remotePath = metaBackupPath.getRemotePath(); + result.snapshotInstant = metaBackupPath.getTime().toInstant(); + + try { + // Download the meta file. + Path metaFile = downloadMetaFile(metaBackupPath); + // Read the local meta file. + List metaFileList = getSSTFilesFromMeta(metaFile); + FileUtils.deleteQuietly(metaFile.toFile()); + result.manifestAvailable = true; + + // List the remote file system to validate the backup. + String prefix = fs.getPrefix().toString(); + Date strippedMsSnapshotTime = + new Date(result.snapshotInstant.truncatedTo(ChronoUnit.MINUTES).toEpochMilli()); + Iterator backupfiles = + fs.list(prefix, strippedMsSnapshotTime, strippedMsSnapshotTime); + + // Return validation fail if backup filesystem listing failed. + if (!backupfiles.hasNext()) { + logger.warn( + "ERROR: No files available while doing backup filesystem listing. Declaring the verification failed."); + return result; + } + + // Convert the remote listing to String. + List remoteListing = new ArrayList<>(); + while (backupfiles.hasNext()) { + AbstractBackupPath path = backupfiles.next(); + if (path.getType() == AbstractBackupPath.BackupFileType.SNAP) + remoteListing.add(path.getRemotePath()); + } + + if (metaFileList.isEmpty() && remoteListing.isEmpty()) { + logger.info( + "Uncommon Scenario: Both meta file and backup filesystem listing is empty. Considering this as success"); + result.valid = true; + return result; + } + + ArrayList filesMatched = + (ArrayList) CollectionUtils.intersection(metaFileList, remoteListing); + result.filesMatched = filesMatched.size(); + result.filesInMetaOnly = metaFileList; + result.filesInMetaOnly.removeAll(filesMatched); + + // There could be a scenario that backupfilesystem has more files than meta file. e.g. + // some leftover objects + result.valid = (result.filesInMetaOnly.isEmpty()); + } catch (Exception e) { + logger.error( + "Error while processing meta file: " + metaBackupPath, e.getLocalizedMessage()); + e.printStackTrace(); + } + + return result; + } + + @Override + public Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException { + fs.downloadFile(meta, ".download" /* suffix */, 10 /* retries */); + return Paths.get(meta.newRestoreFile().getAbsolutePath() + ".download"); + } + + @Override + public List getSSTFilesFromMeta(Path localMetaPath) throws Exception { + if (localMetaPath.toFile().isDirectory() || !localMetaPath.toFile().exists()) + throw new InvalidPathException( + localMetaPath.toString(), "Input path is either directory or do not exist"); + + List result = new ArrayList<>(); + JSONParser jsonParser = new JSONParser(); + org.json.simple.JSONArray fileList = + (org.json.simple.JSONArray) + jsonParser.parse(new FileReader(localMetaPath.toFile())); + fileList.forEach(entry -> result.add(entry.toString())); + return result; + } + + @Override + public Iterator getIncrementals(DateUtil.DateRange dateRange) + throws BackupRestoreException { + String prefix = fs.getPrefix().toString(); + Iterator iterator = + fs.list( + prefix, + new Date(dateRange.getStartTime().toEpochMilli()), + new Date(dateRange.getEndTime().toEpochMilli())); + return new FilterIterator<>( + iterator, + abstractBackupPath -> + abstractBackupPath.getType() == AbstractBackupPath.BackupFileType.SST); + } + + @Override + public void cleanupOldMetaFiles() {} +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/MetaV2Proxy.java b/priam/src/main/java/com/netflix/priam/backupv2/MetaV2Proxy.java new file mode 100644 index 000000000..1cfa44c73 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/MetaV2Proxy.java @@ -0,0 +1,247 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Provider; +import com.netflix.priam.backup.*; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import javax.inject.Inject; +import org.apache.commons.collections4.iterators.FilterIterator; +import org.apache.commons.collections4.iterators.TransformIterator; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.filefilter.FileFilterUtils; +import org.apache.commons.io.filefilter.IOFileFilter; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Do any management task for meta files. Created by aagrawal on 8/2/18. */ +public class MetaV2Proxy implements IMetaProxy { + private static final Logger logger = LoggerFactory.getLogger(MetaV2Proxy.class); + private final Path metaFileDirectory; + private final IBackupFileSystem fs; + private final Provider abstractBackupPathProvider; + + @Inject + MetaV2Proxy( + IConfiguration configuration, + IFileSystemContext backupFileSystemCtx, + Provider abstractBackupPathProvider) { + fs = backupFileSystemCtx.getFileStrategy(configuration); + this.abstractBackupPathProvider = abstractBackupPathProvider; + metaFileDirectory = Paths.get(configuration.getDataFileLocation()); + } + + @Override + public Path getLocalMetaFileDirectory() { + return metaFileDirectory; + } + + @Override + public String getMetaPrefix(DateUtil.DateRange dateRange) { + return getMatch(dateRange, AbstractBackupPath.BackupFileType.META_V2); + } + + private String getMatch( + DateUtil.DateRange dateRange, AbstractBackupPath.BackupFileType backupFileType) { + Path location = fs.getPrefix(); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + String match = StringUtils.EMPTY; + if (dateRange != null) match = dateRange.match(); + if (dateRange != null && dateRange.getEndTime() == null) + match = dateRange.getStartTime().toEpochMilli() + ""; + return Paths.get( + abstractBackupPath.remoteV2Prefix(location, backupFileType).toString(), + match) + .toString(); + } + + @Override + public Iterator getIncrementals(DateUtil.DateRange dateRange) + throws BackupRestoreException { + String incrementalPrefix = getMatch(dateRange, AbstractBackupPath.BackupFileType.SST_V2); + String marker = + getMatch( + new DateUtil.DateRange(dateRange.getStartTime(), null), + AbstractBackupPath.BackupFileType.SST_V2); + logger.info( + "Listing filesystem with prefix: {}, marker: {}, daterange: {}", + incrementalPrefix, + marker, + dateRange); + Iterator iterator = fs.listFileSystem(incrementalPrefix, null, marker); + Iterator transformIterator = + new TransformIterator<>( + iterator, + s -> { + AbstractBackupPath path = abstractBackupPathProvider.get(); + path.parseRemote(s); + return path; + }); + + return new FilterIterator<>( + transformIterator, + abstractBackupPath -> + (abstractBackupPath.getLastModified().isAfter(dateRange.getStartTime()) + && abstractBackupPath + .getLastModified() + .isBefore(dateRange.getEndTime())) + || abstractBackupPath + .getLastModified() + .equals(dateRange.getStartTime()) + || abstractBackupPath + .getLastModified() + .equals(dateRange.getEndTime())); + } + + @Override + public List findMetaFiles(DateUtil.DateRange dateRange) { + ArrayList metas = new ArrayList<>(); + String prefix = getMetaPrefix(dateRange); + String marker = getMetaPrefix(new DateUtil.DateRange(dateRange.getStartTime(), null)); + logger.info( + "Listing filesystem with prefix: {}, marker: {}, daterange: {}", + prefix, + marker, + dateRange); + Iterator iterator = fs.listFileSystem(prefix, null, marker); + + while (iterator.hasNext()) { + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseRemote(iterator.next()); + logger.debug("Meta file found: {}", abstractBackupPath); + if (abstractBackupPath.getLastModified().toEpochMilli() + >= dateRange.getStartTime().toEpochMilli() + && abstractBackupPath.getLastModified().toEpochMilli() + <= dateRange.getEndTime().toEpochMilli()) { + metas.add(abstractBackupPath); + } + } + + metas.sort(Collections.reverseOrder()); + + if (metas.size() == 0) { + logger.info( + "No meta file found on remote file system for the time period: {}", dateRange); + } + + return metas; + } + + @Override + public Path downloadMetaFile(AbstractBackupPath meta) throws BackupRestoreException { + fs.downloadFile(meta, "" /* suffix */, 10 /* retries */); + return Paths.get(meta.newRestoreFile().getAbsolutePath()); + } + + @Override + public void cleanupOldMetaFiles() { + logger.info("Deleting any old META_V2 files if any"); + IOFileFilter fileNameFilter = + FileFilterUtils.and( + FileFilterUtils.prefixFileFilter(MetaFileInfo.META_FILE_PREFIX), + FileFilterUtils.or( + FileFilterUtils.suffixFileFilter(MetaFileInfo.META_FILE_SUFFIX), + FileFilterUtils.suffixFileFilter( + MetaFileInfo.META_FILE_SUFFIX + ".tmp"))); + Collection files = + FileUtils.listFiles(metaFileDirectory.toFile(), fileNameFilter, null); + files.stream() + .filter(File::isFile) + .forEach( + file -> { + logger.debug( + "Deleting old META_V2 file found: {}", file.getAbsolutePath()); + file.delete(); + }); + } + + @Override + public List getSSTFilesFromMeta(Path localMetaPath) throws Exception { + MetaFileBackupWalker metaFileBackupWalker = new MetaFileBackupWalker(); + metaFileBackupWalker.readMeta(localMetaPath); + return metaFileBackupWalker.backupRemotePaths; + } + + @Override + public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) { + MetaFileBackupValidator metaFileBackupValidator = new MetaFileBackupValidator(); + BackupVerificationResult result = metaFileBackupValidator.verificationResult; + result.remotePath = metaBackupPath.getRemotePath(); + result.snapshotInstant = metaBackupPath.getLastModified(); + + Path metaFile = null; + try { + metaFile = downloadMetaFile(metaBackupPath); + result.manifestAvailable = true; + + metaFileBackupValidator.readMeta(metaFile); + result.valid = (result.filesInMetaOnly.isEmpty()); + } catch (FileNotFoundException fne) { + logger.error(fne.getLocalizedMessage()); + } catch (IOException ioe) { + logger.error( + "IO Error while processing meta file: " + metaFile, ioe.getLocalizedMessage()); + ioe.printStackTrace(); + } catch (BackupRestoreException bre) { + logger.error("Error while trying to download the manifest file: {}", metaBackupPath); + } finally { + if (metaFile != null) FileUtils.deleteQuietly(metaFile.toFile()); + } + return result; + } + + private class MetaFileBackupValidator extends MetaFileReader { + private BackupVerificationResult verificationResult = new BackupVerificationResult(); + + @Override + public void process(ColumnFamilyResult columnfamilyResult) { + for (ColumnFamilyResult.SSTableResult ssTableResult : + columnfamilyResult.getSstables()) { + for (FileUploadResult fileUploadResult : ssTableResult.getSstableComponents()) { + if (fs.checkObjectExists(Paths.get(fileUploadResult.getBackupPath()))) { + verificationResult.filesMatched++; + } else { + verificationResult.filesInMetaOnly.add(fileUploadResult.getBackupPath()); + } + } + } + } + } + + private class MetaFileBackupWalker extends MetaFileReader { + private List backupRemotePaths = new ArrayList<>(); + + @Override + public void process(ColumnFamilyResult columnfamilyResult) { + for (ColumnFamilyResult.SSTableResult ssTableResult : + columnfamilyResult.getSstables()) { + for (FileUploadResult fileUploadResult : ssTableResult.getSstableComponents()) { + backupRemotePaths.add(fileUploadResult.getBackupPath()); + } + } + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/PrefixGenerator.java b/priam/src/main/java/com/netflix/priam/backupv2/PrefixGenerator.java deleted file mode 100644 index e88e7a05c..000000000 --- a/priam/src/main/java/com/netflix/priam/backupv2/PrefixGenerator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.backupv2; - -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.utils.DateUtil; - -import javax.inject.Inject; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Instant; - -/** - * This is a utility class to get the backup location of the SSTables/Meta files with backup version 2.0. - * TODO: All this functinality will be used when we have BackupUploadDownloadService. - * Created by aagrawal on 6/5/18. - */ -public class PrefixGenerator { - - private IConfiguration configuration; - private InstanceIdentity instanceIdentity; - - @Inject - PrefixGenerator(IConfiguration configuration, InstanceIdentity instanceIdentity) { - this.configuration = configuration; - this.instanceIdentity = instanceIdentity; - } - - public Path getPrefix() { - return Paths.get(configuration.getBackupLocation(), configuration.getBackupPrefix(), getAppNameReverse(), instanceIdentity.getInstance().getToken()); - } - - public Path getSSTPrefix() { - return getPrefix(); - } - - public Path getSSTLocation(Instant instant, String keyspaceName, String columnfamilyName, String prefix, String fileName) { - return Paths.get(getPrefix().toString(), DateUtil.formatInstant(DateUtil.ddMMyyyyHHmm, instant), keyspaceName, columnfamilyName, prefix, fileName); - } - - public Path getMetaPrefix() { - return Paths.get(getPrefix().toString(), "META"); - } - - public Path getMetaLocation(Instant instant, String metaFileName) { - return Paths.get(getMetaPrefix().toString(), DateUtil.formatInstant(DateUtil.ddMMyyyyHHmm, instant), metaFileName); - } - - private String getAppNameReverse() { - return new StringBuilder(configuration.getAppName()).reverse().toString(); - } - - //e.g. mc-3-big-Data.db or sample_cf-ka-7213-Index.db - - /** - * Gives the prefix (common name) of the sstable components. Returns null if it is not sstable component - * e.g. mc-3-big-Data.db or ks-cf-ka-7213-Index.db will return mc-3-big or ks-cf-ka-7213 - * @param fileName name of the file for common prefix - * @return common prefix of the file, or null, if not identified as sstable component. - */ - public static final String getSSTFileBase(String fileName) { - String prefix = null; - try{ - prefix = fileName.substring(0, fileName.lastIndexOf("-")); - }catch (IndexOutOfBoundsException e) - { - //Do nothing - } - - return prefix; - } -} diff --git a/priam/src/main/java/com/netflix/priam/backupv2/SnapshotMetaTask.java b/priam/src/main/java/com/netflix/priam/backupv2/SnapshotMetaTask.java new file mode 100644 index 000000000..2192caf01 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/backupv2/SnapshotMetaTask.java @@ -0,0 +1,430 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.backupv2; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSetMultimap; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.netflix.priam.backup.*; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.CassandraOperations; +import com.netflix.priam.health.CassandraMonitor; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.scheduler.CronTimer; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.text.ParseException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; +import java.time.temporal.ChronoUnit; +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import javax.inject.Inject; +import javax.inject.Named; +import javax.inject.Singleton; +import org.apache.commons.io.FileUtils; +import org.quartz.CronExpression; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This service will run on CRON as specified by {@link + * IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} The intent of this service is to run + * a full snapshot on Cassandra, get the list of the SSTables on disk and then create a + * manifest.json file which will encapsulate the list of the files i.e. capture filesystem at a + * moment in time. This manifest.json file will ensure the true filesystem status is exposed (for + * external entities) and will be used in future for Priam Backup Version 2 where a file is not + * uploaded to backup file system unless SSTable has been modified. This will lead to huge reduction + * in storage costs and provide bandwidth back to Cassandra instead of creating/uploading snapshots. + * Note that this component will "try" to enqueue the files to upload, but no guarantee is provided. + * If the enqueue fails for any reason, it is considered "OK" as there will be another service + * pushing all the files in the queue for upload (think of this like a cleanup thread and will help + * us in "resuming" any failed backup for any reason). Created by aagrawal on 6/18/18. + */ +@Singleton +public class SnapshotMetaTask extends AbstractBackup { + public static final String JOBNAME = "SnapshotMetaService"; + + private static final Logger logger = LoggerFactory.getLogger(SnapshotMetaTask.class); + private static final String SNAPSHOT_PREFIX = "snap_v2_"; + private static final String CASSANDRA_MANIFEST_FILE = "manifest.json"; + private static final String CASSANDRA_SCHEMA_FILE = "schema.cql"; + private static final TimeZone UTC = TimeZone.getTimeZone(ZoneId.of("UTC")); + private final BackupRestoreUtil backupRestoreUtil; + private final MetaFileWriterBuilder metaFileWriter; + private MetaFileWriterBuilder.DataStep dataStep; + private final IMetaProxy metaProxy; + private final CassandraOperations cassandraOperations; + private String snapshotName = null; + private static final Lock lock = new ReentrantLock(); + private final IBackupStatusMgr snapshotStatusMgr; + private final InstanceIdentity instanceIdentity; + private final ExecutorService threadPool; + private final IConfiguration config; + private final Clock clock; + private final IBackupRestoreConfig backupRestoreConfig; + private final BackupVerification backupVerification; + private final BackupHelper backupHelper; + + private enum MetaStep { + META_GENERATION, + UPLOAD_FILES + } + + private MetaStep metaStep = MetaStep.META_GENERATION; + + @Inject + SnapshotMetaTask( + IConfiguration config, + BackupHelper backupHelper, + MetaFileWriterBuilder metaFileWriter, + @Named("v2") IMetaProxy metaProxy, + InstanceIdentity instanceIdentity, + IBackupStatusMgr snapshotStatusMgr, + CassandraOperations cassandraOperations, + Clock clock, + IBackupRestoreConfig backupRestoreConfig, + BackupVerification backupVerification) { + super(config); + this.config = config; + this.backupHelper = backupHelper; + this.instanceIdentity = instanceIdentity; + this.snapshotStatusMgr = snapshotStatusMgr; + this.cassandraOperations = cassandraOperations; + this.clock = clock; + this.backupRestoreConfig = backupRestoreConfig; + this.backupVerification = backupVerification; + backupRestoreUtil = + new BackupRestoreUtil( + config.getSnapshotIncludeCFList(), config.getSnapshotExcludeCFList()); + this.metaFileWriter = metaFileWriter; + this.metaProxy = metaProxy; + this.threadPool = Executors.newSingleThreadExecutor(); + } + + /** + * Interval between generating snapshot meta file using {@link SnapshotMetaTask}. + * + * @param config {@link IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} to get + * configuration details from priam. Use "-1" to disable the service. + * @return the timer to be used for snapshot meta service. + * @throws IllegalArgumentException if the configuration is not set correctly or are not valid. + * This is to ensure we fail-fast. + */ + public static TaskTimer getTimer(IBackupRestoreConfig config) throws IllegalArgumentException { + return CronTimer.getCronTimer(JOBNAME, config.getSnapshotMetaServiceCronExpression()); + } + + static void cleanOldBackups(IConfiguration config) throws Exception { + // Clean up all the backup directories, if any. + Set backupPaths = AbstractBackup.getBackupDirectories(config, SNAPSHOT_FOLDER); + for (Path backupDirPath : backupPaths) + try (DirectoryStream directoryStream = + Files.newDirectoryStream(backupDirPath, Files::isDirectory)) { + for (Path backupDir : directoryStream) { + if (backupDir.toFile().getName().startsWith(SNAPSHOT_PREFIX)) { + FileUtils.deleteDirectory(backupDir.toFile()); + } + } + } + } + + public static boolean isBackupEnabled(IBackupRestoreConfig backupRestoreConfig) + throws Exception { + return (getTimer(backupRestoreConfig) != null); + } + + String generateSnapshotName(Instant snapshotInstant) { + return SNAPSHOT_PREFIX + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, snapshotInstant); + } + + /** + * Enqueue all the files for upload in the snapshot directory. This will only enqueue the files + * and do not give guarantee as when they will be uploaded. It will only try to upload files + * which matches backup version 2.0 naming conventions. + */ + public void uploadFiles() { + try { + // enqueue all the old snapshot folder for upload/delete, if any, as we don't want + // our disk to be filled by them. + metaStep = MetaStep.UPLOAD_FILES; + initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil); + logger.info("Finished queuing the files for upload"); + } catch (Exception e) { + logger.error("Error while trying to upload all the files", e); + e.printStackTrace(); + } finally { + metaStep = MetaStep.META_GENERATION; + } + } + + @Override + public void execute() throws Exception { + if (!CassandraMonitor.hasCassadraStarted()) { + logger.debug("Cassandra has not started, hence SnapshotMetaService will not run"); + return; + } + + // Do not allow more than one snapshotMetaService to run at the same time. This is possible + // as this happens on CRON. + if (!lock.tryLock()) { + logger.warn("SnapshotMetaService is already running! Try again later."); + throw new Exception("SnapshotMetaService already running"); + } + + // Save start snapshot status + Instant snapshotInstant = clock.instant(); + String token = instanceIdentity.getInstance().getToken(); + BackupMetadata backupMetadata = + new BackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + token, + new Date(snapshotInstant.toEpochMilli())); + snapshotStatusMgr.start(backupMetadata); + + try { + snapshotName = generateSnapshotName(snapshotInstant); + logger.info("Initializing SnapshotMetaService for taking a snapshot {}", snapshotName); + + // Perform a cleanup of old snapshot meta_v2.json files, if any, as we don't want our + // disk to be filled by them. + // These files may be leftover + // 1) when Priam shutdown in middle of this service and may not be full JSON + // 2) No permission to upload to backup file system. + metaProxy.cleanupOldMetaFiles(); + + // Take a new snapshot + cassandraOperations.takeSnapshot(snapshotName); + backupMetadata.setCassandraSnapshotSuccess(true); + + // Process the snapshot and upload the meta file. + MetaFileWriterBuilder.UploadStep uploadStep = processSnapshot(snapshotInstant); + backupMetadata.setSnapshotLocation( + config.getBackupPrefix() + File.separator + uploadStep.getRemoteMetaFilePath()); + uploadStep.uploadMetaFile(); + + logger.info("Finished processing snapshot meta service"); + + // Upload all the files from snapshot + uploadFiles(); + snapshotStatusMgr.finish(backupMetadata); + } catch (Exception e) { + logger.error("Error while executing SnapshotMetaService", e); + snapshotStatusMgr.failed(backupMetadata); + } finally { + lock.unlock(); + } + } + + MetaFileWriterBuilder.UploadStep processSnapshot(Instant snapshotInstant) throws Exception { + dataStep = metaFileWriter.newBuilder().startMetaFileGeneration(snapshotInstant); + initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil); + return dataStep.endMetaFileGeneration(); + } + + private File getValidSnapshot(File snapshotDir, String snapshotName) { + File[] snapshotDirectories = snapshotDir.listFiles(); + if (snapshotDirectories != null) + for (File fileName : snapshotDirectories) + if (fileName.exists() + && fileName.isDirectory() + && fileName.getName().matches(snapshotName)) return fileName; + return null; + } + + @Override + public String getName() { + return JOBNAME; + } + + private void uploadAllFiles(final File backupDir) throws Exception { + // Process all the snapshots with SNAPSHOT_PREFIX. This will ensure that we "resume" the + // uploads of previous snapshot leftover as Priam restarted or any failure for any reason + // (like we exhausted the wait time for upload) + File[] snapshotDirectories = backupDir.listFiles(); + if (snapshotDirectories != null) { + Instant target = getUploadTarget(); + for (File snapshotDirectory : snapshotDirectories) { + // Is it a valid SNAPSHOT_PREFIX + if (!snapshotDirectory.getName().startsWith(SNAPSHOT_PREFIX) + || !snapshotDirectory.isDirectory()) continue; + + if (FileUtils.sizeOfDirectory(snapshotDirectory) == 0) { + FileUtils.deleteQuietly(snapshotDirectory); + continue; + } + + // Process each snapshot of SNAPSHOT_PREFIX + // We do not want to wait for completion and we just want to add them to queue. This + // is to ensure that next run happens on time. + AbstractBackupPath.BackupFileType type = AbstractBackupPath.BackupFileType.SST_V2; + backupHelper.uploadAndDeleteAllFiles(snapshotDirectory, type, target, true); + + // Next, upload secondary indexes + type = AbstractBackupPath.BackupFileType.SECONDARY_INDEX_V2; + ImmutableList> futures; + for (File subDir : getSecondaryIndexDirectories(snapshotDirectory)) { + futures = backupHelper.uploadAndDeleteAllFiles(subDir, type, target, true); + if (futures.isEmpty()) { + deleteIfEmpty(subDir); + } + Futures.whenAllComplete(futures).call(() -> deleteIfEmpty(subDir), threadPool); + } + } + } + } + + private Instant getUploadTarget() { + Instant now = clock.instant(); + Instant target = + now.plus(config.getTargetMinutesToCompleteSnaphotUpload(), ChronoUnit.MINUTES); + Duration verificationSLO = + Duration.ofHours(backupRestoreConfig.getBackupVerificationSLOInHours()); + Instant verificationDeadline = + backupVerification + .getLatestVerfifiedBackupTime() + .map(backupTime -> backupTime.plus(verificationSLO)) + .orElse(Instant.MAX); + Instant nextSnapshotTime; + try { + CronExpression snapshotCron = + new CronExpression(backupRestoreConfig.getSnapshotMetaServiceCronExpression()); + snapshotCron.setTimeZone(UTC); + Date nextSnapshotDate = snapshotCron.getNextValidTimeAfter(Date.from(now)); + nextSnapshotTime = + nextSnapshotDate == null ? Instant.MAX : nextSnapshotDate.toInstant(); + } catch (ParseException e) { + nextSnapshotTime = Instant.MAX; + } + return earliest(target, verificationDeadline, nextSnapshotTime); + } + + private Instant earliest(Instant... instants) { + return Arrays.stream(instants).min(Instant::compareTo).get(); + } + + private Void deleteIfEmpty(File dir) { + if (FileUtils.sizeOfDirectory(dir) == 0) FileUtils.deleteQuietly(dir); + return null; + } + + @Override + protected void processColumnFamily(File backupDir) throws Exception { + String keyspace = getKeyspace(backupDir); + String columnFamily = getColumnFamily(backupDir); + switch (metaStep) { + case META_GENERATION: + generateMetaFile(keyspace, columnFamily, backupDir) + .ifPresent(this::deleteUploadedFiles); + break; + case UPLOAD_FILES: + uploadAllFiles(backupDir); + break; + default: + throw new Exception("Unknown meta file type: " + metaStep); + } + } + + private Optional generateMetaFile( + final String keyspace, final String columnFamily, final File backupDir) + throws Exception { + File snapshotDir = getValidSnapshot(backupDir, snapshotName); + // Process this snapshot folder for the given columnFamily + if (snapshotDir == null) { + logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName); + return Optional.empty(); + } + + logger.debug("Scanning for all SSTables in: {}", snapshotDir.getAbsolutePath()); + ImmutableSetMultimap.Builder builder = + ImmutableSetMultimap.builder(); + builder.putAll(getSSTables(snapshotDir, AbstractBackupPath.BackupFileType.SST_V2)); + + // Next, add secondary indexes + for (File directory : getSecondaryIndexDirectories(snapshotDir)) { + builder.putAll( + getSSTables(directory, AbstractBackupPath.BackupFileType.SECONDARY_INDEX_V2)); + } + + ImmutableSetMultimap sstables = builder.build(); + logger.debug("Processing {} sstables from {}.{}", keyspace, columnFamily, sstables.size()); + ColumnFamilyResult result = + dataStep.addColumnfamilyResult(keyspace, columnFamily, sstables); + logger.debug("Finished processing KS: {}, CF: {}", keyspace, columnFamily); + return Optional.of(result); + } + + private void deleteUploadedFiles(ColumnFamilyResult result) { + result.getSstables() + .stream() + .flatMap(sstable -> sstable.getSstableComponents().stream()) + .filter(file -> Boolean.TRUE.equals(file.getIsUploaded())) + .forEach(file -> FileUtils.deleteQuietly(file.getFileName().toFile())); + } + + private ImmutableSetMultimap getSSTables( + File snapshotDir, AbstractBackupPath.BackupFileType type) throws IOException { + ImmutableSetMultimap.Builder ssTables = + ImmutableSetMultimap.builder(); + backupHelper + .getBackupPaths(snapshotDir, type) + .forEach(bp -> getPrefix(bp.getBackupFile()).ifPresent(p -> ssTables.put(p, bp))); + return ssTables.build(); + } + + /** + * Gives the prefix (common name) of the sstable components. Returns an empty Optional if it is + * not an sstable component or a manifest or schema file. + * + *

For example: mc-3-big-Data.db -- mc-3-big ks-cf-ka-7213-Index.db -- ks-cf-ka-7213 + * + * @param file the file from which to extract a common prefix. + * @return common prefix of the file, or empty, + */ + private static Optional getPrefix(File file) { + String fileName = file.getName(); + String prefix = null; + if (fileName.contains("-")) { + prefix = fileName.substring(0, fileName.lastIndexOf("-")); + } else if (fileName.equalsIgnoreCase(CASSANDRA_MANIFEST_FILE)) { + prefix = "manifest"; + } else if (fileName.equalsIgnoreCase(CASSANDRA_SCHEMA_FILE)) { + prefix = "schema"; + } else { + logger.error("Unknown file type with no SSTFileBase found: {}", file.getAbsolutePath()); + } + return Optional.ofNullable(prefix); + } + + @VisibleForTesting + void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } +} diff --git a/priam/src/main/java/com/netflix/priam/cli/Application.java b/priam/src/main/java/com/netflix/priam/cli/Application.java index 24fea63c3..6eea39c20 100644 --- a/priam/src/main/java/com/netflix/priam/cli/Application.java +++ b/priam/src/main/java/com/netflix/priam/cli/Application.java @@ -18,21 +18,20 @@ import com.google.inject.Guice; import com.google.inject.Injector; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.IBackupFileSystem; +import com.netflix.priam.config.IConfiguration; public class Application { - static private Injector injector; + private static Injector injector; static Injector getInjector() { - if (injector == null) - injector = Guice.createInjector(new LightGuiceModule()); + if (injector == null) injector = Guice.createInjector(new LightGuiceModule()); return injector; } static void initialize() { IConfiguration conf = getInjector().getInstance(IConfiguration.class); - conf.intialize(); + conf.initialize(); } static void shutdownAdditionalThreads() { diff --git a/priam/src/main/java/com/netflix/priam/cli/Backuper.java b/priam/src/main/java/com/netflix/priam/cli/Backuper.java index 005d7dae6..1b4bb374c 100644 --- a/priam/src/main/java/com/netflix/priam/cli/Backuper.java +++ b/priam/src/main/java/com/netflix/priam/cli/Backuper.java @@ -36,4 +36,4 @@ public static void main(String[] args) { Application.shutdownAdditionalThreads(); } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cli/IncrementalBackuper.java b/priam/src/main/java/com/netflix/priam/cli/IncrementalBackuper.java index 87f6574de..5075e4f58 100644 --- a/priam/src/main/java/com/netflix/priam/cli/IncrementalBackuper.java +++ b/priam/src/main/java/com/netflix/priam/cli/IncrementalBackuper.java @@ -26,7 +26,8 @@ public class IncrementalBackuper { public static void main(String[] args) { try { Application.initialize(); - IncrementalBackup backuper = Application.getInjector().getInstance(IncrementalBackup.class); + IncrementalBackup backuper = + Application.getInjector().getInstance(IncrementalBackup.class); try { backuper.execute(); } catch (Exception e) { @@ -36,4 +37,4 @@ public static void main(String[] args) { Application.shutdownAdditionalThreads(); } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cli/LightGuiceModule.java b/priam/src/main/java/com/netflix/priam/cli/LightGuiceModule.java index ff5fb0650..1f59b05b1 100644 --- a/priam/src/main/java/com/netflix/priam/cli/LightGuiceModule.java +++ b/priam/src/main/java/com/netflix/priam/cli/LightGuiceModule.java @@ -17,18 +17,16 @@ package com.netflix.priam.cli; import com.google.inject.AbstractModule; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.aws.S3FileSystem; import com.netflix.priam.backup.IBackupFileSystem; -import com.netflix.priam.config.PriamConfiguration; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.IMembership; class LightGuiceModule extends AbstractModule { @Override protected void configure() { - bind(IConfiguration.class).to(PriamConfiguration.class).asEagerSingleton(); + bind(IConfiguration.class).asEagerSingleton(); bind(IMembership.class).to(StaticMembership.class); bind(IBackupFileSystem.class).to(S3FileSystem.class); } } - diff --git a/priam/src/main/java/com/netflix/priam/cli/Restorer.java b/priam/src/main/java/com/netflix/priam/cli/Restorer.java index 8fed49ee3..742fb9166 100644 --- a/priam/src/main/java/com/netflix/priam/cli/Restorer.java +++ b/priam/src/main/java/com/netflix/priam/cli/Restorer.java @@ -16,13 +16,12 @@ */ package com.netflix.priam.cli; -import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.restore.Restore; +import com.netflix.priam.utils.DateUtil; +import java.time.Instant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Date; - public class Restorer { private static final Logger logger = LoggerFactory.getLogger(Restorer.class); @@ -33,19 +32,17 @@ static void displayHelp() { public static void main(String[] args) { try { Application.initialize(); - - Date startTime, endTime; + Instant startTime, endTime; if (args.length < 2) { displayHelp(); return; } - AbstractBackupPath path = Application.getInjector().getInstance(AbstractBackupPath.class); - startTime = path.parseDate(args[0]); - endTime = path.parseDate(args[1]); + startTime = DateUtil.parseInstant(args[0]); + endTime = DateUtil.parseInstant(args[1]); Restore restorer = Application.getInjector().getInstance(Restore.class); try { - restorer.restore(startTime, endTime); + restorer.restore(new DateUtil.DateRange(startTime, endTime)); } catch (Exception e) { logger.error("Unable to restore: ", e); } @@ -53,4 +50,4 @@ public static void main(String[] args) { Application.shutdownAdditionalThreads(); } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cli/StaticMembership.java b/priam/src/main/java/com/netflix/priam/cli/StaticMembership.java index e6a739f2e..2f1626737 100644 --- a/priam/src/main/java/com/netflix/priam/cli/StaticMembership.java +++ b/priam/src/main/java/com/netflix/priam/cli/StaticMembership.java @@ -16,17 +16,15 @@ */ package com.netflix.priam.cli; +import com.google.common.collect.ImmutableSet; import com.netflix.priam.identity.IMembership; -import org.apache.cassandra.io.util.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.FileInputStream; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; -import java.util.List; import java.util.Properties; +import org.apache.cassandra.io.util.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class StaticMembership implements IMembership { private static final String MEMBERSHIP_PRE = "membership."; @@ -37,7 +35,7 @@ public class StaticMembership implements IMembership { private static final Logger logger = LoggerFactory.getLogger(StaticMembership.class); - private List racMembership; + private ImmutableSet racMembership; private int racCount; public StaticMembership() throws IOException { @@ -57,26 +55,25 @@ public StaticMembership() throws IOException { for (String name : config.stringPropertyNames()) { if (name.startsWith(INSTANCES_PRE)) { racCount += 1; - if (name == INSTANCES_PRE + racName) - racMembership = Arrays.asList(config.getProperty(name).split(",")); + if (name.equals(INSTANCES_PRE + racName)) + racMembership = ImmutableSet.copyOf(config.getProperty(name).split(",")); } } } @Override - public List getRacMembership() { + public ImmutableSet getRacMembership() { return racMembership; } @Override - public List getCrossAccountRacMembership() { + public ImmutableSet getCrossAccountRacMembership() { return null; } @Override public int getRacMembershipSize() { - if (racMembership == null) - return 0; + if (racMembership == null) return 0; return racMembership.size(); } @@ -86,19 +83,16 @@ public int getRacCount() { } @Override - public void addACL(Collection listIPs, int from, int to) { - } + public void addACL(Collection listIPs, int from, int to) {} @Override - public void removeACL(Collection listIPs, int from, int to) { - } + public void removeACL(Collection listIPs, int from, int to) {} @Override - public List listACL(int from, int to) { + public ImmutableSet listACL(int from, int to) { return null; } @Override - public void expandRacMembership(int count) { - } -} \ No newline at end of file + public void expandRacMembership(int count) {} +} diff --git a/priam/src/main/java/com/netflix/priam/cluster/management/ClusterManagementService.java b/priam/src/main/java/com/netflix/priam/cluster/management/ClusterManagementService.java new file mode 100644 index 000000000..eaa75680a --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/cluster/management/ClusterManagementService.java @@ -0,0 +1,48 @@ +package com.netflix.priam.cluster.management; +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.scheduler.PriamScheduler; +import javax.inject.Inject; + +public class ClusterManagementService implements IService { + private final PriamScheduler scheduler; + private final IConfiguration config; + + @Inject + public ClusterManagementService(IConfiguration configuration, PriamScheduler priamScheduler) { + this.scheduler = priamScheduler; + this.config = configuration; + } + + @Override + public void scheduleService() throws Exception { + // Set up nodetool flush task + scheduleTask(scheduler, Flush.class, Flush.getTimer(config)); + + // Set up compaction task + scheduleTask(scheduler, Compaction.class, Compaction.getTimer(config)); + } + + @Override + public void updateServicePre() throws Exception {} + + @Override + public void updateServicePost() throws Exception {} +} diff --git a/priam/src/main/java/com/netflix/priam/cluster/management/Compaction.java b/priam/src/main/java/com/netflix/priam/cluster/management/Compaction.java index 271f7b546..f2ebb0bf3 100644 --- a/priam/src/main/java/com/netflix/priam/cluster/management/Compaction.java +++ b/priam/src/main/java/com/netflix/priam/cluster/management/Compaction.java @@ -1,129 +1,117 @@ /** * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cluster.management; +import com.netflix.priam.backup.BackupRestoreUtil; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.defaultimpl.CassandraOperations; +import com.netflix.priam.connection.CassandraOperations; import com.netflix.priam.merics.CompactionMeasurement; import com.netflix.priam.scheduler.CronTimer; import com.netflix.priam.scheduler.TaskTimer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.inject.Inject; +import javax.inject.Singleton; import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.inject.Inject; -import javax.inject.Singleton; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; -/** - * Utility class to compact the keyspaces/columnfamilies - * Created by aagrawal on 1/25/18. - */ +/** Utility class to compact the keyspaces/columnfamilies Created by aagrawal on 1/25/18. */ @Singleton public class Compaction extends IClusterManagement { private static final Logger logger = LoggerFactory.getLogger(Compaction.class); private final IConfiguration config; - private static final Pattern columnFamilyFilterPattern = Pattern.compile(".\\.."); private final CassandraOperations cassandraOperations; + @Inject - public Compaction(IConfiguration config, CassandraOperations cassandraOperations, CompactionMeasurement compactionMeasurement) { + public Compaction( + IConfiguration config, + CassandraOperations cassandraOperations, + CompactionMeasurement compactionMeasurement) { super(config, Task.COMPACTION, compactionMeasurement); this.config = config; this.cassandraOperations = cassandraOperations; } - private final Map> getCompactionFilter(String compactionFilter) throws IllegalArgumentException { - if (StringUtils.isEmpty(compactionFilter)) - return null; - final Map> columnFamilyFilter = new HashMap<>(); //key: keyspace, value: a list of CFs within the keyspace - String[] filters = compactionFilter.split(","); - for (int i = 0; i < filters.length; i++) { //process each filter - if (columnFamilyFilterPattern.matcher(filters[i]).find()) { - String[] filter = filters[i].split("\\."); - String keyspaceName = filter[0]; - String columnFamilyName = filter[1]; - if (columnFamilyName.indexOf("-") != -1) - columnFamilyName = columnFamilyName.substring(0, columnFamilyName.indexOf("-")); - List existingCfs = columnFamilyFilter.getOrDefault(keyspaceName, new ArrayList<>()); - if (!columnFamilyName.equalsIgnoreCase("*")) - existingCfs.add(columnFamilyName); - columnFamilyFilter.put(keyspaceName, existingCfs); - } else { - throw new IllegalArgumentException("Column family filter format is not valid. Format needs to be \"keyspace.columnfamily\". Invalid input: " + filters[i]); - } - } - return columnFamilyFilter; - } - final Map> getCompactionIncludeFilter(IConfiguration config) throws Exception { - if (StringUtils.isEmpty(config.getCompactionIncludeCFList())) - return null; - Map> columnFamilyFilter = getCompactionFilter(config.getCompactionIncludeCFList()); + + final Map> getCompactionIncludeFilter(IConfiguration config) + throws Exception { + Map> columnFamilyFilter = + BackupRestoreUtil.getFilter(config.getCompactionIncludeCFList()); logger.info("Compaction: Override for include CF provided by user: {}", columnFamilyFilter); return columnFamilyFilter; } - final Map> getCompactionExcludeFilter(IConfiguration config) throws Exception { - if (StringUtils.isEmpty(config.getCompactionExcludeCFList())) - return null; - Map> columnFamilyFilter = getCompactionFilter(config.getCompactionExcludeCFList()); + + final Map> getCompactionExcludeFilter(IConfiguration config) + throws Exception { + Map> columnFamilyFilter = + BackupRestoreUtil.getFilter(config.getCompactionExcludeCFList()); logger.info("Compaction: Override for exclude CF provided by user: {}", columnFamilyFilter); return columnFamilyFilter; } + final Map> getCompactionFilterCfs(IConfiguration config) throws Exception { final Map> includeFilter = getCompactionIncludeFilter(config); final Map> excludeFilter = getCompactionExcludeFilter(config); final Map> allColumnfamilies = cassandraOperations.getColumnfamilies(); Map> result = new HashMap<>(); - allColumnfamilies.entrySet().forEach(entry -> { - String keyspaceName = entry.getKey(); - if (SchemaConstant.isSystemKeyspace(keyspaceName)) //no need to compact system keyspaces. - return; - List columnfamilies = entry.getValue(); - if (excludeFilter != null && excludeFilter.containsKey(keyspaceName)) { - List excludeCFFilter = excludeFilter.get(keyspaceName); - //Is CF list null/empty? If yes, then exclude all CF's for this keyspace. - if (excludeCFFilter == null || excludeCFFilter.isEmpty()) - return; - columnfamilies = (List) CollectionUtils.removeAll(columnfamilies, excludeCFFilter); - } - if (includeFilter != null) { - //Include filter is not empty and this keyspace is not provided in include filter. Ignore processing of this keyspace. - if (!includeFilter.containsKey(keyspaceName)) + + allColumnfamilies.forEach( + (keyspaceName, columnfamilies) -> { + if (SchemaConstant.isSystemKeyspace( + keyspaceName)) // no need to compact system keyspaces. return; - List includeCFFilter = includeFilter.get(keyspaceName); - //If include filter is empty or null, it means include all. - //If not, then we need to find intersection of CF's which are present and one which are configured to compact. - if (includeCFFilter != null && !includeCFFilter.isEmpty()) //If include filter is empty or null, it means include all. - columnfamilies = (List) CollectionUtils.intersection(columnfamilies, includeCFFilter); - } - if (columnfamilies != null && !columnfamilies.isEmpty()) - result.put(keyspaceName, columnfamilies); - }); + + if (excludeFilter != null && excludeFilter.containsKey(keyspaceName)) { + List excludeCFFilter = excludeFilter.get(keyspaceName); + // Is CF list null/empty? If yes, then exclude all CF's for this keyspace. + if (excludeCFFilter == null || excludeCFFilter.isEmpty()) return; + columnfamilies = + (List) + CollectionUtils.removeAll(columnfamilies, excludeCFFilter); + } + if (includeFilter != null) { + // Include filter is not empty and this keyspace is not provided in include + // filter. Ignore processing of this keyspace. + if (!includeFilter.containsKey(keyspaceName)) return; + List includeCFFilter = includeFilter.get(keyspaceName); + // If include filter is empty or null, it means include all. + // If not, then we need to find intersection of CF's which are present and + // one which are configured to compact. + if (includeCFFilter != null + && !includeCFFilter + .isEmpty()) // If include filter is empty or null, it means + // include all. + columnfamilies = + (List) + CollectionUtils.intersection( + columnfamilies, includeCFFilter); + } + if (columnfamilies != null && !columnfamilies.isEmpty()) + result.put(keyspaceName, columnfamilies); + }); return result; } /* - * @return the keyspace(s) compacted. List can be empty but never null. - */ + * @return the keyspace(s) compacted. List can be empty but never null. + */ protected String runTask() throws Exception { final Map> columnfamilies = getCompactionFilterCfs(config); if (!columnfamilies.isEmpty()) for (Map.Entry> entry : columnfamilies.entrySet()) { - cassandraOperations.forceKeyspaceCompaction(entry.getKey(), entry.getValue().toArray(new String[0])); + cassandraOperations.forceKeyspaceCompaction( + entry.getKey(), entry.getValue().toArray(new String[0])); } return columnfamilies.toString(); } @@ -131,9 +119,11 @@ protected String runTask() throws Exception { * Timer to be used for compaction interval. * * @param config {@link IConfiguration} to get configuration details from priam. - * @return the timer to be used for compaction interval from {@link IConfiguration#getCompactionCronExpression()} + * @return the timer to be used for compaction interval from {@link + * IConfiguration#getCompactionCronExpression()} + * @throws Exception If the cron expression is invalid. */ public static TaskTimer getTimer(IConfiguration config) throws Exception { return CronTimer.getCronTimer(Task.COMPACTION.name(), config.getCompactionCronExpression()); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cluster/management/Flush.java b/priam/src/main/java/com/netflix/priam/cluster/management/Flush.java index 6d6d97b9c..e167eb8eb 100644 --- a/priam/src/main/java/com/netflix/priam/cluster/management/Flush.java +++ b/priam/src/main/java/com/netflix/priam/cluster/management/Flush.java @@ -1,50 +1,46 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cluster.management; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.defaultimpl.CassandraOperations; +import com.netflix.priam.connection.CassandraOperations; import com.netflix.priam.merics.NodeToolFlushMeasurement; import com.netflix.priam.scheduler.CronTimer; import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.scheduler.UnsupportedTypeException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.inject.Singleton; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; +import javax.inject.Inject; +import javax.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Utility to flush Keyspaces from memtable to disk - * Created by vinhn on 10/12/16. - */ +/** Utility to flush Keyspaces from memtable to disk Created by vinhn on 10/12/16. */ @Singleton public class Flush extends IClusterManagement { private static final Logger logger = LoggerFactory.getLogger(Flush.class); private final IConfiguration config; private final CassandraOperations cassandraOperations; - private List keyspaces = new ArrayList(); + private List keyspaces = new ArrayList<>(); @Inject - public Flush(IConfiguration config, CassandraOperations cassandraOperations, NodeToolFlushMeasurement nodeToolFlushMeasurement) { + public Flush( + IConfiguration config, + CassandraOperations cassandraOperations, + NodeToolFlushMeasurement nodeToolFlushMeasurement) { super(config, Task.FLUSH, nodeToolFlushMeasurement); this.config = config; this.cassandraOperations = cassandraOperations; @@ -55,9 +51,9 @@ public Flush(IConfiguration config, CassandraOperations cassandraOperations, Nod * @return the keyspace(s) flushed. List can be empty but never null. */ protected String runTask() throws Exception { - List flushed = new ArrayList(); + List flushed = new ArrayList<>(); - //Get keyspaces to flush + // Get keyspaces to flush deriveKeyspaces(); if (this.keyspaces == null || this.keyspaces.isEmpty()) { @@ -65,14 +61,14 @@ protected String runTask() throws Exception { return flushed.toString(); } - //If flush is for certain keyspaces, validate keyspace exist + // If flush is for certain keyspaces, validate keyspace exist for (String keyspace : keyspaces) { if (!cassandraOperations.getKeyspaces().contains(keyspace)) { throw new IllegalArgumentException("Keyspace [" + keyspace + "] does not exist."); } - if (SchemaConstant.isSystemKeyspace(keyspace)) //no need to flush system keyspaces. - continue; + if (SchemaConstant.isSystemKeyspace(keyspace)) // no need to flush system keyspaces. + continue; try { cassandraOperations.forceKeyspaceFlush(keyspace); @@ -88,8 +84,8 @@ protected String runTask() throws Exception { /* Derive keyspace(s) to flush in the following order: explicit list provided by caller, property, or all keyspaces. */ - private void deriveKeyspaces() throws Exception{ - //== get value from property + private void deriveKeyspaces() throws Exception { + // == get value from property String raw = this.config.getFlushKeyspaces(); if (raw != null && !raw.isEmpty()) { String k[] = raw.split(","); @@ -100,7 +96,7 @@ private void deriveKeyspaces() throws Exception{ return; } - //== no override via FP, default to all keyspaces + // == no override via FP, default to all keyspaces this.keyspaces = cassandraOperations.getKeyspaces(); } @@ -108,42 +104,12 @@ private void deriveKeyspaces() throws Exception{ * Timer to be used for flush interval. * * @param config {@link IConfiguration} to get configuration details from priam. - * @return the timer to be used for flush interval. - *

- * If {@link IConfiguration#getFlushSchedulerType()} is {@link com.netflix.priam.scheduler.SchedulerType#HOUR} then it expects {@link IConfiguration#getFlushInterval()} in the format of hour=x or daily=x - *

- * If {@link IConfiguration#getFlushSchedulerType()} is {@link com.netflix.priam.scheduler.SchedulerType#CRON} then it expects a valid CRON expression from {@link IConfiguration#getFlushCronExpression()} + * @return the timer to be used for compaction interval from {@link + * IConfiguration#getFlushCronExpression()} + * @throws Exception If the cron expression is invalid. */ public static TaskTimer getTimer(IConfiguration config) throws Exception { - CronTimer cronTimer = null; - switch (config.getFlushSchedulerType()) { - case HOUR: - String timerVal = config.getFlushInterval(); //e.g. hour=0 or daily=10 - if (timerVal == null) - return null; - String s[] = timerVal.split("="); - if (s.length != 2) { - throw new IllegalArgumentException("Flush interval format is invalid. Expecting name=value, received: " + timerVal); - } - String name = s[0].toUpperCase(); - Integer time = new Integer(s[1]); - switch (name) { - case "HOUR": - cronTimer = new CronTimer(Task.FLUSH.name(), time, 0); //minute, sec after each hour - break; - case "DAILY": - cronTimer = new CronTimer(Task.FLUSH.name(), time, 0, 0); //hour, minute, sec to run on a daily basis - break; - default: - throw new UnsupportedTypeException("Flush interval type is invalid. Expecting \"hour, daily\", received: " + name); - } - - break; - case CRON: - cronTimer = CronTimer.getCronTimer(Task.FLUSH.name(), config.getFlushCronExpression()); - break; - } - return cronTimer; + return CronTimer.getCronTimer(Task.FLUSH.name(), config.getFlushCronExpression()); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cluster/management/IClusterManagement.java b/priam/src/main/java/com/netflix/priam/cluster/management/IClusterManagement.java index 3ba49c454..8b80a1d1a 100644 --- a/priam/src/main/java/com/netflix/priam/cluster/management/IClusterManagement.java +++ b/priam/src/main/java/com/netflix/priam/cluster/management/IClusterManagement.java @@ -1,41 +1,39 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cluster.management; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.CassandraMonitor; import com.netflix.priam.merics.IMeasurement; import com.netflix.priam.scheduler.Task; -import com.netflix.priam.utils.CassandraMonitor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Created by vinhn on 10/12/16. - */ +/** Created by vinhn on 10/12/16. */ public abstract class IClusterManagement extends Task { - public enum Task {FLUSH, COMPACTION} + public enum Task { + FLUSH, + COMPACTION + } private static final Logger logger = LoggerFactory.getLogger(IClusterManagement.class); - private Task taskType; - private IMeasurement measurement; - private static Lock lock = new ReentrantLock(); + private final Task taskType; + private final IMeasurement measurement; + private static final Lock lock = new ReentrantLock(); protected IClusterManagement(IConfiguration config, Task taskType, IMeasurement measurement) { super(config); @@ -58,11 +56,15 @@ public void execute() throws Exception { try { String result = runTask(); measurement.incrementSuccess(); - logger.info("Successfully finished executing the cluster management task: {} with result: {}", taskType, result); + logger.info( + "Successfully finished executing the cluster management task: {} with result: {}", + taskType, + result); if (result.isEmpty()) { - logger.warn("{} task completed successfully but no action was done.", taskType.name()); + logger.warn( + "{} task completed successfully but no action was done.", taskType.name()); } - } catch (Exception e){ + } catch (Exception e) { measurement.incrementFailure(); throw new Exception("Exception during execution of operation: " + taskType.name(), e); } finally { diff --git a/priam/src/main/java/com/netflix/priam/cluster/management/SchemaConstant.java b/priam/src/main/java/com/netflix/priam/cluster/management/SchemaConstant.java index 02b937d7c..48029ce9e 100644 --- a/priam/src/main/java/com/netflix/priam/cluster/management/SchemaConstant.java +++ b/priam/src/main/java/com/netflix/priam/cluster/management/SchemaConstant.java @@ -17,12 +17,9 @@ package com.netflix.priam.cluster.management; import com.google.common.collect.ImmutableSet; - import java.util.Set; -/** - * Created by aagrawal on 3/6/18. - */ +/** Created by aagrawal on 3/6/18. */ class SchemaConstant { private static final String SYSTEM_KEYSPACE_NAME = "system"; private static final String SCHEMA_KEYSPACE_NAME = "system_schema"; @@ -32,9 +29,15 @@ class SchemaConstant { private static final String DSE_SYSTEM = "dse_system"; private static final Set SYSTEM_KEYSPACE_NAMES = - ImmutableSet.of(SYSTEM_KEYSPACE_NAME, SCHEMA_KEYSPACE_NAME, TRACE_KEYSPACE_NAME, AUTH_KEYSPACE_NAME, DISTRIBUTED_KEYSPACE_NAME, DSE_SYSTEM); + ImmutableSet.of( + SYSTEM_KEYSPACE_NAME, + SCHEMA_KEYSPACE_NAME, + TRACE_KEYSPACE_NAME, + AUTH_KEYSPACE_NAME, + DISTRIBUTED_KEYSPACE_NAME, + DSE_SYSTEM); - public static final boolean isSystemKeyspace(String keyspace){ + public static final boolean isSystemKeyspace(String keyspace) { return SYSTEM_KEYSPACE_NAMES.contains(keyspace.toLowerCase()); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/compress/ChunkedStream.java b/priam/src/main/java/com/netflix/priam/compress/ChunkedStream.java index 4aff039c3..81f86c6ab 100644 --- a/priam/src/main/java/com/netflix/priam/compress/ChunkedStream.java +++ b/priam/src/main/java/com/netflix/priam/compress/ChunkedStream.java @@ -16,31 +16,34 @@ */ package com.netflix.priam.compress; -import org.apache.commons.io.IOUtils; -import org.xerial.snappy.SnappyOutputStream; - import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.Iterator; +import org.apache.commons.io.IOUtils; +import org.xerial.snappy.SnappyOutputStream; -/** - * Byte iterator representing compressed data. - * Uses snappy compression - */ +/** Byte iterator representing compressed data. Uses snappy compression */ public class ChunkedStream implements Iterator { + private static final int BYTES_TO_READ = 2048; + private boolean hasnext = true; - private ByteArrayOutputStream bos; - private SnappyOutputStream compress; - private InputStream origin; - private long chunkSize; - private static int BYTES_TO_READ = 2048; + private final ByteArrayOutputStream bos; + private final SnappyOutputStream snappy; + private final InputStream origin; + private final long chunkSize; + private final CompressionType compression; + + public ChunkedStream(InputStream is, long chunkSize) { + this(is, chunkSize, CompressionType.NONE); + } - public ChunkedStream(InputStream is, long chunkSize) throws IOException { + public ChunkedStream(InputStream is, long chunkSize, CompressionType compression) { this.origin = is; this.bos = new ByteArrayOutputStream(); - this.compress = new SnappyOutputStream(bos); + this.snappy = new SnappyOutputStream(bos); this.chunkSize = chunkSize; + this.compression = compression; } @Override @@ -54,9 +57,17 @@ public byte[] next() { byte data[] = new byte[BYTES_TO_READ]; int count; while ((count = origin.read(data, 0, data.length)) != -1) { - compress.write(data, 0, count); - if (bos.size() >= chunkSize) - return returnSafe(); + switch (compression) { + case NONE: + bos.write(data, 0, count); + break; + case SNAPPY: + snappy.write(data, 0, count); + break; + default: + throw new IllegalArgumentException("Snappy compression only."); + } + if (bos.size() >= chunkSize) return returnSafe(); } // We don't have anything else to read hence set to false. return done(); @@ -66,10 +77,10 @@ public byte[] next() { } private byte[] done() throws IOException { - compress.flush(); + if (compression == CompressionType.SNAPPY) snappy.flush(); byte[] return_ = bos.toByteArray(); hasnext = false; - IOUtils.closeQuietly(compress); + IOUtils.closeQuietly(snappy); IOUtils.closeQuietly(bos); IOUtils.closeQuietly(origin); return return_; @@ -82,7 +93,5 @@ private byte[] returnSafe() throws IOException { } @Override - public void remove() { - } - + public void remove() {} } diff --git a/priam/src/main/java/com/netflix/priam/compress/CompressionType.java b/priam/src/main/java/com/netflix/priam/compress/CompressionType.java new file mode 100644 index 000000000..63d516b9e --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/compress/CompressionType.java @@ -0,0 +1,7 @@ +package com.netflix.priam.compress; + +public enum CompressionType { + SNAPPY, + LZ4, + NONE +} diff --git a/priam/src/main/java/com/netflix/priam/compress/ICompression.java b/priam/src/main/java/com/netflix/priam/compress/ICompression.java index 48139ef8c..2a0267893 100644 --- a/priam/src/main/java/com/netflix/priam/compress/ICompression.java +++ b/priam/src/main/java/com/netflix/priam/compress/ICompression.java @@ -17,27 +17,16 @@ package com.netflix.priam.compress; import com.google.inject.ImplementedBy; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.Iterator; @ImplementedBy(SnappyCompression.class) public interface ICompression { - enum CompressionAlgorithm { - SNAPPY, LZ4, NONE - } - /** - * Uncompress the input stream and write to the output stream. - * Closes both input and output streams + * Uncompress the input stream and write to the output stream. Closes both input and output + * streams */ void decompressAndClose(InputStream input, OutputStream output) throws IOException; - - /** - * Produces chunks of compressed data. - */ - Iterator compress(InputStream is, long chunkSize) throws IOException; } diff --git a/priam/src/main/java/com/netflix/priam/compress/SnappyCompression.java b/priam/src/main/java/com/netflix/priam/compress/SnappyCompression.java index a510a966a..20d1d3f84 100644 --- a/priam/src/main/java/com/netflix/priam/compress/SnappyCompression.java +++ b/priam/src/main/java/com/netflix/priam/compress/SnappyCompression.java @@ -16,24 +16,14 @@ */ package com.netflix.priam.compress; +import java.io.*; import org.apache.commons.io.IOUtils; import org.xerial.snappy.SnappyInputStream; -import java.io.*; -import java.util.Iterator; - -/** - * Class to generate compressed chunks of data from an input stream using - * SnappyCompression - */ +/** Class to generate compressed chunks of data from an input stream using SnappyCompression */ public class SnappyCompression implements ICompression { private static final int BUFFER = 2 * 1024; - @Override - public Iterator compress(InputStream is, long chunkSize) throws IOException { - return new ChunkedStream(is, chunkSize); - } - @Override public void decompressAndClose(InputStream input, OutputStream output) throws IOException { try { @@ -45,17 +35,13 @@ public void decompressAndClose(InputStream input, OutputStream output) throws IO } private void decompress(InputStream input, OutputStream output) throws IOException { - SnappyInputStream is = new SnappyInputStream(new BufferedInputStream(input)); byte data[] = new byte[BUFFER]; - BufferedOutputStream dest1 = new BufferedOutputStream(output, BUFFER); - try { + try (BufferedOutputStream dest1 = new BufferedOutputStream(output, BUFFER); + SnappyInputStream is = new SnappyInputStream(new BufferedInputStream(input))) { int c; while ((c = is.read(data, 0, BUFFER)) != -1) { dest1.write(data, 0, c); } - } finally { - IOUtils.closeQuietly(dest1); - IOUtils.closeQuietly(is); } } } diff --git a/priam/src/main/java/com/netflix/priam/config/BackupRestoreConfig.java b/priam/src/main/java/com/netflix/priam/config/BackupRestoreConfig.java index 7046a0a2f..c1d320029 100644 --- a/priam/src/main/java/com/netflix/priam/config/BackupRestoreConfig.java +++ b/priam/src/main/java/com/netflix/priam/config/BackupRestoreConfig.java @@ -1,29 +1,26 @@ /** * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.config; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableSet; import com.netflix.priam.configSource.IConfigSource; - import javax.inject.Inject; +import org.apache.commons.lang3.StringUtils; -/** - * Implementation of IBackupRestoreConfig. - * Created by aagrawal on 6/26/18. - */ -public class BackupRestoreConfig implements IBackupRestoreConfig{ +/** Implementation of IBackupRestoreConfig. Created by aagrawal on 6/26/18. */ +public class BackupRestoreConfig implements IBackupRestoreConfig { private final IConfigSource config; @@ -32,7 +29,44 @@ public BackupRestoreConfig(IConfigSource config) { this.config = config; } - public String getSnapshotMetaServiceCronExpression(){ + @Override + public String getSnapshotMetaServiceCronExpression() { return config.get("priam.snapshot.meta.cron", "-1"); } + + @Override + public boolean enableV2Backups() { + return config.get("priam.enableV2Backups", false); + } + + @Override + public boolean enableV2Restore() { + return config.get("priam.enableV2Restore", false); + } + + @Override + public int getBackupTTLMonitorPeriodInSec() { + return config.get("priam.backupTTLMonitorPeriodInSec", 21600); + } + + @Override + public int getBackupVerificationSLOInHours() { + return config.get("priam.backupVerificationSLOInHours", 24); + } + + @Override + public String getBackupVerificationCronExpression() { + return config.get("priam.backupVerificationCronExpression", "0 30 0/1 1/1 * ? *"); + } + + @Override + public String getBackupNotifyComponentIncludeList() { + return config.get("priam.backupNotifyComponentIncludeList", StringUtils.EMPTY); + } + + @Override + public ImmutableSet getBackupNotificationAdditionalMessageAttrs() { + String value = config.get("priam.backupNotifyAdditionalMessageAttrs", StringUtils.EMPTY); + return ImmutableSet.copyOf(Splitter.on(",").omitEmptyStrings().trimResults().split(value)); + } } diff --git a/priam/src/main/java/com/netflix/priam/config/BackupsToCompress.java b/priam/src/main/java/com/netflix/priam/config/BackupsToCompress.java new file mode 100644 index 000000000..9a7591cc1 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/config/BackupsToCompress.java @@ -0,0 +1,7 @@ +package com.netflix.priam.config; + +public enum BackupsToCompress { + ALL, + IF_REQUIRED, + NONE +} diff --git a/priam/src/main/java/com/netflix/priam/config/IBackupRestoreConfig.java b/priam/src/main/java/com/netflix/priam/config/IBackupRestoreConfig.java index c7d2c4d3d..5ded92836 100644 --- a/priam/src/main/java/com/netflix/priam/config/IBackupRestoreConfig.java +++ b/priam/src/main/java/com/netflix/priam/config/IBackupRestoreConfig.java @@ -1,25 +1,25 @@ /** * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.config; +import com.google.common.collect.ImmutableSet; import com.google.inject.ImplementedBy; +import org.apache.commons.lang3.StringUtils; /** - * This interface is to abstract out the backup and restore configuration used by Priam. Goal is to eventually have each module/functionality to have its own Config. - * Created by aagrawal on 6/26/18. + * This interface is to abstract out the backup and restore configuration used by Priam. Goal is to + * eventually have each module/functionality to have its own Config. Created by aagrawal on 6/26/18. */ @ImplementedBy(BackupRestoreConfig.class) public interface IBackupRestoreConfig { @@ -28,10 +28,100 @@ public interface IBackupRestoreConfig { * Cron expression to be used for snapshot meta service. Use "-1" to disable the service. * * @return Snapshot Meta Service cron expression for generating manifest.json - * @see quartz-scheduler - * @see http://www.cronmaker.com To build new cron timer + * @see quartz-scheduler + * @see http://www.cronmaker.com */ - default String getSnapshotMetaServiceCronExpression(){ - return "-1"; - } + default String getSnapshotMetaServiceCronExpression() { + return "-1"; + } + + /** + * Enable the backup version 2.0 in new format. This will start uploads of "incremental" backups + * in new format. This is to be used for migration from backup version 1.0. + * + * @return boolean value indicating if backups in version 2.0 should be started. + */ + default boolean enableV2Backups() { + return false; + } + + /** + * Monitoring period for the service which does TTL of the backups. This service will run only + * if v2 backups are enabled. The idea is to run this service at least once a day to ensure we + * are marking backup files for TTL as configured via {@link + * IConfiguration#getBackupRetentionDays()}. Use -1 to disable this service. + * + *

NOTE: This should be scheduled on interval rather than CRON as this results in entire + * fleet to start deletion of files at the same time and remote file system may get overwhelmed. + * + * @return Backup TTL Service execution duration for trying to delete backups. Note that this + * denotes duration of the job trying to delete backups and is not the TTL of the backups. + * @see quartz-scheduler + * @see http://www.cronmaker.com + */ + default int getBackupTTLMonitorPeriodInSec() { + return 21600; + } + + /** + * Cron expression to be used for the service which does verification of the backups. This + * service will run only if v2 backups are enabled. + * + * @return Backup Verification Service cron expression for trying to verify backups. Default: + * run every hour at 30 minutes. + * @see quartz-scheduler + * @see http://www.cronmaker.com + */ + default String getBackupVerificationCronExpression() { + return "0 30 0/1 1/1 * ? *"; + } + + /** + * The default backup SLO for any cluster. This will ensure that we upload and validate a backup + * in that SLO window. If no valid backup is found, we log ERROR message. This service will run + * only if v2 backups are enabled. + * + * @return the backup SLO in hours. Default: 24 hours. + */ + default int getBackupVerificationSLOInHours() { + return 24; + } + + /** + * If restore is enabled and if this flag is enabled, we will try to restore using Backup V2.0. + * + * @return if restore should be using backup version 2.0. If this is false we will use backup + * version 1.0. + */ + default boolean enableV2Restore() { + return false; + } + + /** + * Returns a csv of backup component file types {@link + * com.netflix.priam.backup.AbstractBackupPath.BackupFileType} on which to send backup + * notifications. Default value of this filter is an empty string which would imply that backup + * notifications will be sent for all component types see {@link + * com.netflix.priam.backup.AbstractBackupPath.BackupFileType}. Sample filter : + * "SNAPSHOT_VERIFIED, META_V2" + * + * @return A csv string that can be parsed to infer the component file types on which to send + * backup related notifications + */ + default String getBackupNotifyComponentIncludeList() { + return StringUtils.EMPTY; + } + + /** + * Returns a set of attribute names to add to MessageAttributes in the backup notifications. SNS + * filter policy needs keys in MessageAttributes in order to filter based on those keys. + * + * @return A set of attributes to include in MessageAttributes. + */ + default ImmutableSet getBackupNotificationAdditionalMessageAttrs() { + return ImmutableSet.of(); + } } diff --git a/priam/src/main/java/com/netflix/priam/config/IConfiguration.java b/priam/src/main/java/com/netflix/priam/config/IConfiguration.java index 829ebabda..db46690e2 100644 --- a/priam/src/main/java/com/netflix/priam/config/IConfiguration.java +++ b/priam/src/main/java/com/netflix/priam/config/IConfiguration.java @@ -16,780 +16,951 @@ */ package com.netflix.priam.config; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableSet; import com.google.inject.ImplementedBy; -import com.netflix.priam.tuner.JVMOption; -import com.netflix.priam.config.PriamConfiguration; -import com.netflix.priam.tuner.GCType; -import com.netflix.priam.identity.config.InstanceDataRetriever; -import com.netflix.priam.scheduler.SchedulerType; import com.netflix.priam.scheduler.UnsupportedTypeException; - +import com.netflix.priam.tuner.GCType; +import java.io.File; +import java.util.Collections; import java.util.List; import java.util.Map; +import org.apache.commons.lang3.StringUtils; -/** - * Interface for Priam's configuration - */ +/** Interface for Priam's configuration */ @ImplementedBy(PriamConfiguration.class) public interface IConfiguration { - void intialize(); + void initialize(); - /** - * @return Path to the home dir of Cassandra - */ - String getCassHome(); + /** @return Path to the home dir of Cassandra */ + default String getCassHome() { + return "/etc/cassandra"; + } - String getYamlLocation(); + /** @return Location to `cassandra.yaml`. */ + default String getYamlLocation() { + return getCassHome() + "/conf/cassandra.yaml"; + } - /** - * @return Path to jvm.options file. This is used to pass JVM options to Cassandra. - */ - String getJVMOptionsFileLocation(); + /** @return Path to jvm.options file. This is used to pass JVM options to Cassandra. */ + default String getJVMOptionsFileLocation() { + return getCassHome() + "/conf/jvm-server.options"; + } /** - * @return Type of garbage collection mechanism to use for Cassandra. Supported values are CMS,G1GC + * @return Type of garbage collection mechanism to use for Cassandra. Supported values are + * CMS,G1GC */ - GCType getGCType() throws UnsupportedTypeException; + default GCType getGCType() throws UnsupportedTypeException { + return GCType.CMS; + } - /** - * @return Set of JVM options to exclude/comment. - */ - Map getJVMExcludeSet(); + /** @return Set of JVM options to exclude/comment. */ + default String getJVMExcludeSet() { + return StringUtils.EMPTY; + } - /** - * @return Set of JMV options to add/upsert - */ - Map getJVMUpsertSet(); + /** @return Set of JMV options to add/upsert */ + default String getJVMUpsertSet() { + return StringUtils.EMPTY; + } - /** - * @return Path to Cassandra startup script - */ - String getCassStartupScript(); + /** @return Path to Cassandra startup script */ + default String getCassStartupScript() { + return "/etc/init.d/cassandra start"; + } - /** - * @return Path to Cassandra stop sript - */ - String getCassStopScript(); + /** @return Path to Cassandra stop sript */ + default String getCassStopScript() { + return "/etc/init.d/cassandra stop"; + } /** - * @return int representing how many seconds Priam should fail healthchecks for before gracefully draining (nodetool drain) - * cassandra prior to stop. If this number is negative then no draining occurs and Priam immediately stops Cassanddra - * using the provided stop script. If this number is >= 0 then Priam will fail healthchecks for this number of - * seconds before gracefully draining cassandra (nodetool drain) and stopping cassandra with the stop script. + * @return int representing how many seconds Priam should fail healthchecks for before + * gracefully draining (nodetool drain) cassandra prior to stop. If this number is negative + * then no draining occurs and Priam immediately stops Cassanddra using the provided stop + * script. If this number is >= 0 then Priam will fail healthchecks for this number of + * seconds before gracefully draining cassandra (nodetool drain) and stopping cassandra with + * the stop script. */ - int getGracefulDrainHealthWaitSeconds(); + default int getGracefulDrainHealthWaitSeconds() { + return -1; + } /** - * @return int representing how often (in seconds) Priam should auto-remediate Cassandra process crash - * If zero, Priam will restart Cassandra whenever it notices it is crashed - * If a positive number, Priam will restart cassandra no more than once in that number of seconds. For example a - * value of 60 means that Priam will only restart Cassandra once per 60 seconds - * If a negative number, Priam will not restart Cassandra due to crash at all + * @return int representing how often (in seconds) Priam should auto-remediate Cassandra process + * crash If zero, Priam will restart Cassandra whenever it notices it is crashed If a + * positive number, Priam will restart cassandra no more than once in that number of + * seconds. For example a value of 60 means that Priam will only restart Cassandra once per + * 60 seconds If a negative number, Priam will not restart Cassandra due to crash at all */ - int getRemediateDeadCassandraRate(); + default int getRemediateDeadCassandraRate() { + return 3600; + } /** * Eg: 'my_backup' will result in all files stored under this dir/prefix * * @return Prefix that will be added to remote backup location */ - String getBackupLocation(); + default String getBackupLocation() { + return "backup"; + } - /** - * @return Get Backup retention in days - */ - int getBackupRetentionDays(); + /** @return Get Backup retention in days */ + default int getBackupRetentionDays() { + return 0; + } - /** - * @return Get list of racs to backup. Backup all racs if empty - */ - List getBackupRacs(); + /** @return Get list of racs to backup. Backup all racs if empty */ + default List getBackupRacs() { + return Collections.EMPTY_LIST; + } /** - * Bucket name in case of AWS + * Backup location i.e. remote file system to upload backups. e.g. for S3 it will be s3 bucket + * name * * @return Bucket name used for backups */ - String getBackupPrefix(); - - /** - * Location containing backup files. Typically bucket name followed by path - * to the clusters backup - */ - String getRestorePrefix(); + default String getBackupPrefix() { + return "cassandra-archive"; + } /** - * @param prefix Set the current restore prefix + * @return Location containing backup files. Typically bucket name followed by path to the + * clusters backup */ - void setRestorePrefix(String prefix); + default String getRestorePrefix() { + return StringUtils.EMPTY; + } /** - * @return List of keyspaces to restore. If none, all keyspaces are - * restored. + * This is the location of the data/logs/hints for the cassandra. Priam will by default, create + * all the sub-directories required. This dir should have permission to be altered by both + * cassandra and Priam. If this is configured correctly, there is no need to configure {@link + * #getDataFileLocation()}, {@link #getLogDirLocation()}, {@link #getCacheLocation()} and {@link + * #getCommitLogLocation()}. Alternatively all the other directories should be set explicitly by + * user. Set this location to a drive with fast read/writes performance and sizable disk space. + * + * @return Location where all the data/logs/hints for the cassandra will sit. */ - List getRestoreKeySpaces(); + default String getCassandraBaseDirectory() { + return "/var/lib/cassandra"; + } - /** - * @return Location of the local data dir - */ - String getDataFileLocation(); + /** @return Location of the local data dir */ + default String getDataFileLocation() { + return getCassandraBaseDirectory() + "/data"; + } /** - * Path where cassandra logs should be stored. This is passed to Cassandra as where to store logs. + * Path where cassandra logs should be stored. This is passed to Cassandra as where to store + * logs. + * * @return Path to cassandra logs. */ - String getLogDirLocation(); - - /** - * @return Location of the hints data directory - */ - String getHintsLocation(); - /** - * @return Location of local cache - */ - String getCacheLocation(); - - /** - * @return Location of local commit log dir - */ - String getCommitLogLocation(); + default String getLogDirLocation() { + return getCassandraBaseDirectory() + "/logs"; + } - /** - * @return Remote commit log location for backups - */ - String getBackupCommitLogLocation(); + /** @return Location of the hints data directory */ + default String getHintsLocation() { + return getCassandraBaseDirectory() + "/hints"; + } - /** - * @return Preferred data part size for multi part uploads - */ - long getBackupChunkSize(); + /** @return Location of local cache */ + default String getCacheLocation() { + return getCassandraBaseDirectory() + "/saved_caches"; + } - /** - * @return Cassandra's JMX port - */ - int getJmxPort(); + /** @return Location of local commit log dir */ + default String getCommitLogLocation() { + return getCassandraBaseDirectory() + "/commitlog"; + } - /** - * @return Cassandra's JMX username - */ - String getJmxUsername(); + /** @return Remote commit log location for backups */ + default String getBackupCommitLogLocation() { + return StringUtils.EMPTY; + } - /** - * @return Cassandra's JMX password - */ - String getJmxPassword(); + /** @return Preferred data part size for multi part uploads */ + default long getBackupChunkSize() { + return 10 * 1024 * 1024L; + } - /** - * @return Enables Remote JMX connections n C* - */ - boolean enableRemoteJMX(); + /** @return Cassandra's JMX port */ + default int getJmxPort() { + return 7199; + } + /** @return Cassandra's JMX username */ + default String getJmxUsername() { + return null; + } - /** - * Cassandra storage/cluster communication port - */ - int getStoragePort(); + /** @return Cassandra's JMX password */ + default String getJmxPassword() { + return null; + } - int getSSLStoragePort(); + /** @return Enables Remote JMX connections n C* */ + default boolean enableRemoteJMX() { + return false; + } - /** - * @return Cassandra's thrift port - */ - int getThriftPort(); + /** @return Cassandra storage/cluster communication port */ + default int getStoragePort() { + return 7000; + } - /** - * @return Port for CQL binary transport. - */ - int getNativeTransportPort(); + default int getSSLStoragePort() { + return 7001; + } - /** - * @return Snitch to be used in cassandra.yaml - */ - String getSnitch(); + /** @return Port for CQL binary transport. */ + default int getNativeTransportPort() { + return 9042; + } - /** - * @return Cluster name - */ - String getAppName(); + /** @return Snitch to be used in cassandra.yaml */ + default String getSnitch() { + return "org.apache.cassandra.locator.Ec2Snitch"; + } - /** - * @return RAC (or zone for AWS) - */ - String getRac(); + /** @return Cluster name */ + default String getAppName() { + return "cass_cluster"; + } - /** - * @return List of all RAC used for the cluster - */ + /** @return List of all RAC used for the cluster */ List getRacs(); - /** - * @return Local hostmame - */ - String getHostname(); - - /** - * @return Get instance name (for AWS) - */ - String getInstanceName(); - - /** - * @return Max heap size be used for Cassandra - */ - String getHeapSize(); + /** @return Max heap size be used for Cassandra */ + default String getHeapSize() { + return "8G"; + } - /** - * @return New heap size for Cassandra - */ - String getHeapNewSize(); + /** @return New heap size for Cassandra */ + default String getHeapNewSize() { + return "2G"; + } /** - * Cron expression to be used to schedule regular compactions. Use "-1" to disable the CRON. Default: -1 + * Cron expression to be used to schedule regular compactions. Use "-1" to disable the CRON. + * Default: -1 * * @return Compaction cron expression. - * @see quartz-scheduler + * @see quartz-scheduler * @see http://www.cronmaker.com To build new cron timer */ - default String getCompactionCronExpression(){ + default String getCompactionCronExpression() { return "-1"; } - /** - * Column Family(ies), comma delimited, to start compactions (user-initiated or on CRON). - * Note 1: The expected format is keyspace.cfname. If no value is provided then compaction is scheduled for all KS,CF(s) - * Note 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. - * Note 3: {@link #getCompactionExcludeCFList()} is applied first to exclude CF/keyspace and then {@link #getCompactionIncludeCFList()} is applied to include the CF's/keyspaces. - * @return Column Family(ies), comma delimited, to start compactions. If no filter is applied, returns null. - */ - default String getCompactionIncludeCFList(){ - return null; - } - - /** - * Column family(ies), comma delimited, to exclude while starting compaction (user-initiated or on CRON). - * Note 1: The expected format is keyspace.cfname. If no value is provided then compaction is scheduled for all KS,CF(s) - * Note 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. - * Note 3: {@link #getCompactionExcludeCFList()} is applied first to exclude CF/keyspace and then {@link #getCompactionIncludeCFList()} is applied to include the CF's/keyspaces. - * @return Column Family(ies), comma delimited, to exclude from compactions. If no filter is applied, returns null. + * Column Family(ies), comma delimited, to start compactions (user-initiated or on CRON). Note + * 1: The expected format is keyspace.cfname. If no value is provided then compaction is + * scheduled for all KS,CF(s) Note 2: CF name allows special character "*" to denote all the + * columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note + * 3: {@link #getCompactionExcludeCFList()} is applied first to exclude CF/keyspace and then + * {@link #getCompactionIncludeCFList()} is applied to include the CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to start compactions. If no filter is applied, + * returns null. */ - default String getCompactionExcludeCFList(){ + default String getCompactionIncludeCFList() { return null; } /** - * @return Backup hour for snapshot backups (0 - 23) - * @deprecated Use the {{@link #getBackupCronExpression()}} instead. + * Column family(ies), comma delimited, to exclude while starting compaction (user-initiated or + * on CRON). Note 1: The expected format is keyspace.cfname. If no value is provided then + * compaction is scheduled for all KS,CF(s) Note 2: CF name allows special character "*" to + * denote all the columnfamilies in a given keyspace. e.g. keyspace1.* denotes all the CFs in + * keyspace1. Note 3: {@link #getCompactionExcludeCFList()} is applied first to exclude + * CF/keyspace and then {@link #getCompactionIncludeCFList()} is applied to include the + * CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to exclude from compactions. If no filter is + * applied, returns null. */ - @Deprecated - int getBackupHour(); + default String getCompactionExcludeCFList() { + return null; + } /** * Cron expression to be used for snapshot backups. * * @return Backup cron expression for snapshots - * @see quartz-scheduler + * @see quartz-scheduler * @see http://www.cronmaker.com To build new cron timer */ - String getBackupCronExpression(); + default String getBackupCronExpression() { + return "0 0 12 1/1 * ? *"; + } /** - * Backup scheduler type to use for backup. + * Column Family(ies), comma delimited, to include during snapshot backup. Note 1: The expected + * format is keyspace.cfname. If no value is provided then snapshot contains all KS,CF(s) Note + * 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace. + * e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link + * #getSnapshotExcludeCFList()} is applied first to exclude CF/keyspace and then {@link + * #getSnapshotIncludeCFList()} is applied to include the CF's/keyspaces. * - * @return Type of scheduler to use for backup. Note the default is TIMER based i.e. to use {@link #getBackupHour()}. - * If value of "CRON" is provided it starts using {@link #getBackupCronExpression()}. - */ - SchedulerType getBackupSchedulerType() throws UnsupportedTypeException; - - /* - * @return key spaces, comma delimited, to filter from restore. If no filter is applied, returns null or empty string. - */ - String getSnapshotKeyspaceFilters(); - - /* - * Column Family(ies), comma delimited, to filter from backup. - * *Note: the expected format is keyspace.cfname - * - * @return Column Family(ies), comma delimited, to filter from backup. If no filter is applied, returns null. + * @return Column Family(ies), comma delimited, to include in snapshot backup. If no filter is + * applied, returns null. */ - String getSnapshotCFFilter(); - - /* - * @return key spaces, comma delimited, to filter from restore. If no filter is applied, returns null or empty string. - */ - String getIncrementalKeyspaceFilters(); - - /* - * Column Family(ies), comma delimited, to filter from backup. - * *Note: the expected format is keyspace.cfname - * - * @return Column Family(ies), comma delimited, to filter from backup. If no filter is applied, returns null. - */ - String getIncrementalCFFilter(); - - /* - * @return key spaces, comma delimited, to filter from restore. If no filter is applied, returns null or empty string. - */ - String getRestoreKeyspaceFilter(); - - /* - * Column Family(ies), comma delimited, to filter from backup. - * Note: the expected format is keyspace.cfname - * - * @return Column Family(ies), comma delimited, to filter from restore. If no filter is applied, returns null or empty string. - */ - String getRestoreCFFilter(); + default String getSnapshotIncludeCFList() { + return null; + } /** - * Specifies the start and end time used for restoring data (yyyyMMddHHmm - * format) Eg: 201201132030,201201142030 + * Column family(ies), comma delimited, to exclude during snapshot backup. Note 1: The expected + * format is keyspace.cfname. If no value is provided then snapshot is scheduled for all + * KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in a + * given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link + * #getSnapshotExcludeCFList()} is applied first to exclude CF/keyspace and then {@link + * #getSnapshotIncludeCFList()} is applied to include the CF's/keyspaces. * - * @return Snapshot to be searched and restored + * @return Column Family(ies), comma delimited, to exclude from snapshot backup. If no filter is + * applied, returns null. */ - String getRestoreSnapshot(); - - /** - * @return Get the region to connect to SDB for instance identity - */ - String getSDBInstanceIdentityRegion(); + default String getSnapshotExcludeCFList() { + return null; + } /** - * @return Get the Data Center name (or region for AWS) + * Column Family(ies), comma delimited, to include during incremental backup. Note 1: The + * expected format is keyspace.cfname. If no value is provided then incremental contains all + * KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in a + * given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link + * #getIncrementalExcludeCFList()} is applied first to exclude CF/keyspace and then {@link + * #getIncrementalIncludeCFList()} is applied to include the CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to include in incremental backup. If no filter + * is applied, returns null. */ - String getDC(); + default String getIncrementalIncludeCFList() { + return null; + } /** - * @param region Set the current data center + * Column family(ies), comma delimited, to exclude during incremental backup. Note 1: The + * expected format is keyspace.cfname. If no value is provided then incremental is scheduled for + * all KS,CF(s) Note 2: CF name allows special character "*" to denote all the columnfamilies in + * a given keyspace. e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link + * #getIncrementalExcludeCFList()} is applied first to exclude CF/keyspace and then {@link + * #getIncrementalIncludeCFList()} is applied to include the CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to exclude from incremental backup. If no filter + * is applied, returns null. */ - void setDC(String region); + default String getIncrementalExcludeCFList() { + return null; + } /** - * @return true if it is a multi regional cluster + * Column Family(ies), comma delimited, to include during restore. Note 1: The expected format + * is keyspace.cfname. If no value is provided then restore contains all KS,CF(s) Note 2: CF + * name allows special character "*" to denote all the columnfamilies in a given keyspace. e.g. + * keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link #getRestoreExcludeCFList()} is + * applied first to exclude CF/keyspace and then {@link #getRestoreIncludeCFList()} is applied + * to include the CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to include in restore. If no filter is applied, + * returns null. */ - boolean isMultiDC(); + default String getRestoreIncludeCFList() { + return null; + } /** - * @return Number of backup threads for uploading + * Column family(ies), comma delimited, to exclude during restore. Note 1: The expected format + * is keyspace.cfname. If no value is provided then restore is scheduled for all KS,CF(s) Note + * 2: CF name allows special character "*" to denote all the columnfamilies in a given keyspace. + * e.g. keyspace1.* denotes all the CFs in keyspace1. Note 3: {@link #getRestoreExcludeCFList()} + * is applied first to exclude CF/keyspace and then {@link #getRestoreIncludeCFList()} is + * applied to include the CF's/keyspaces. + * + * @return Column Family(ies), comma delimited, to exclude from restore. If no filter is + * applied, returns null. */ - int getMaxBackupUploadThreads(); + default String getRestoreExcludeCFList() { + return null; + } /** - * @return Number of download threads + * Specifies the start and end time used for restoring data (yyyyMMddHHmm format) Eg: + * 201201132030,201201142030 + * + * @return Snapshot to be searched and restored */ - int getMaxBackupDownloadThreads(); + default String getRestoreSnapshot() { + return StringUtils.EMPTY; + } - /** - * @return true if restore should search for nearest token if current token - * is not found - */ - boolean isRestoreClosestToken(); + /** @return Get the region to connect to SDB for instance identity */ + default String getSDBInstanceIdentityRegion() { + return "us-east-1"; + } - /** - * Amazon specific setting to query ASG Membership - */ - String getASGName(); + /** @return true if it is a multi regional cluster */ + default boolean isMultiDC() { + return false; + } - /** - * Amazon specific setting to query Additional/ Sibling ASG Memberships in csv format to consider while calculating RAC membership - */ - String getSiblingASGNames(); + /** @return Number of backup threads for uploading files when using async feature */ + default int getBackupThreads() { + return 2; + } - /** - * Get the security group associated with nodes in this cluster - */ - String getACLGroupName(); + /** @return Number of download threads for downloading files when using async feature */ + default int getRestoreThreads() { + return 8; + } - /** - * @return true if incremental backups are enabled - */ - boolean isIncrBackup(); + /** @return true if restore should search for nearest token if current token is not found */ + default boolean isRestoreClosestToken() { + return false; + } /** - * @return Get host IP + * Amazon specific setting to query Additional/ Sibling ASG Memberships in csv format to + * consider while calculating RAC membership */ - String getHostIP(); + default String getSiblingASGNames() { + return ","; + } - /** - * @return Bytes per second to throttle for backups - */ - int getUploadThrottle(); + /** Get the security group associated with nodes in this cluster */ + default String getACLGroupName() { + return getAppName(); + } - /** - * @return InstanceDataRetriever which encapsulates meta-data about the running instance like region, RAC, name, ip address etc. - */ - InstanceDataRetriever getInstanceDataRetriever() throws InstantiationException, IllegalAccessException, ClassNotFoundException; + /** @return true if incremental backups are enabled */ + default boolean isIncrementalBackupEnabled() { + return true; + } - /** - * @return true if Priam should local config file for tokens and seeds - */ - boolean isLocalBootstrapEnabled(); + /** @return Bytes per second to throttle for backups */ + default int getUploadThrottle() { + return -1; + } /** - * @return In memory compaction limit + * Get the throttle limit for API call of remote file system - get object exist. Default: 10. + * Use value of -1 to disable this. + * + * @return throttle limit for get object exist API call. */ - int getInMemoryCompactionLimit(); + default int getRemoteFileSystemObjectExistsThrottle() { + return -1; + } - /** - * @return Compaction throughput - */ - int getCompactionThroughput(); + /** @return true if Priam should local config file for tokens and seeds */ + default boolean isLocalBootstrapEnabled() { + return false; + } - /** - * @return compaction_throughput_mb_per_sec - */ - int getMaxHintWindowInMS(); + /** @return Compaction throughput */ + default int getCompactionThroughput() { + return 8; + } - /** - * @return hinted_handoff_throttle_in_kb - */ - int getHintedHandoffThrottleKb(); + /** @return compaction_throughput_mb_per_sec */ + default int getMaxHintWindowInMS() { + return 10800000; + } + /** @return hinted_handoff_throttle_in_kb */ + default int getHintedHandoffThrottleKb() { + return 1024; + } - /** - * @return Size of Cassandra max direct memory - */ - String getMaxDirectMemory(); + /** @return Size of Cassandra max direct memory */ + default String getMaxDirectMemory() { + return "50G"; + } - /** - * @return Bootstrap cluster name (depends on another cass cluster) - */ - String getBootClusterName(); + /** @return Bootstrap cluster name (depends on another cass cluster) */ + default String getBootClusterName() { + return StringUtils.EMPTY; + } - /** - * @return Get the name of seed provider - */ - String getSeedProviderName(); + /** @return Get the name of seed provider */ + default String getSeedProviderName() { + return "com.netflix.priam.cassandra.extensions.NFSeedProvider"; + } /** + * memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) = 0.11 + * * @return memtable_cleanup_threshold in C* yaml */ - double getMemtableCleanupThreshold(); - - /** - * @return stream_throughput_outbound_megabits_per_sec in yaml - */ - int getStreamingThroughputMB(); + default double getMemtableCleanupThreshold() { + return 0.11; + } + /** @return stream_throughput_outbound_megabits_per_sec in yaml */ + default int getStreamingThroughputMB() { + return 400; + } /** * Get the paritioner for this cassandra cluster/node. * * @return the fully-qualified name of the partitioner class */ - String getPartitioner(); - - /** - * Support for c* 1.1 global key cache size - */ - String getKeyCacheSizeInMB(); - - /** - * Support for limiting the total number of keys in c* 1.1 global key cache. - */ - String getKeyCacheKeysToSave(); - - /** - * Support for c* 1.1 global row cache size - */ - String getRowCacheSizeInMB(); - - /** - * Support for limiting the total number of rows in c* 1.1 global row cache. - */ - String getRowCacheKeysToSave(); - - /** - * @return C* Process Name - */ - String getCassProcessName(); - - /** - * Defaults to 'allow all'. - */ - String getAuthenticator(); + default String getPartitioner() { + return "org.apache.cassandra.dht.RandomPartitioner"; + } - /** - * Defaults to 'allow all'. - */ - String getAuthorizer(); + /** Support for c* 1.1 global key cache size */ + default String getKeyCacheSizeInMB() { + return StringUtils.EMPTY; + } - /** - * @return true/false, if Cassandra needs to be started manually - */ - boolean doesCassandraStartManually(); + /** Support for limiting the total number of keys in c* 1.1 global key cache. */ + default String getKeyCacheKeysToSave() { + return StringUtils.EMPTY; + } - /** - * @return possible values: all, dc, none - */ - String getInternodeCompression(); + /** Support for c* 1.1 global row cache size */ + default String getRowCacheSizeInMB() { + return StringUtils.EMPTY; + } - /** - * Enable/disable backup/restore of commit logs. - * @return boolean value true if commit log backup/restore is enabled, false otherwise. Default: false. - */ - boolean isBackingUpCommitLogs(); + /** Support for limiting the total number of rows in c* 1.1 global row cache. */ + default String getRowCacheKeysToSave() { + return StringUtils.EMPTY; + } - String getCommitLogBackupPropsFile(); + /** @return C* Process Name */ + default String getCassProcessName() { + return "CassandraDaemon"; + } - String getCommitLogBackupArchiveCmd(); + /** Defaults to 'allow all'. */ + default String getAuthenticator() { + return "org.apache.cassandra.auth.AllowAllAuthenticator"; + } - String getCommitLogBackupRestoreCmd(); + /** Defaults to 'allow all'. */ + default String getAuthorizer() { + return "org.apache.cassandra.auth.AllowAllAuthorizer"; + } - String getCommitLogBackupRestoreFromDirs(); + /** Defaults to 'CassandraRoleManager'. */ + default String getRoleManager() { + return "org.apache.cassandra.auth.CassandraRoleManager"; + } - String getCommitLogBackupRestorePointInTime(); + /** @return true/false, if Cassandra needs to be started manually */ + default boolean doesCassandraStartManually() { + return false; + } - int maxCommitLogsRestore(); + /** @return possible values: all, dc, none */ + default String getInternodeCompression() { + return "all"; + } /** - * @return true/false, if Cassandra is running in a VPC environment + * Enable/disable backup/restore of commit logs. + * + * @return boolean value true if commit log backup/restore is enabled, false otherwise. Default: + * false. */ - boolean isVpcRing(); + default boolean isBackingUpCommitLogs() { + return false; + } - void setRestoreKeySpaces(List keyspaces); + default String getCommitLogBackupPropsFile() { + return getCassHome() + "/conf/commitlog_archiving.properties"; + } - boolean isClientSslEnabled(); + default String getCommitLogBackupArchiveCmd() { + return "/bin/ln %path /mnt/data/backup/%name"; + } - String getInternodeEncryption(); + default String getCommitLogBackupRestoreCmd() { + return "/bin/mv %from %to"; + } - boolean isDynamicSnitchEnabled(); + default String getCommitLogBackupRestoreFromDirs() { + return "/mnt/data/backup/commitlog/"; + } - boolean isThriftEnabled(); + default String getCommitLogBackupRestorePointInTime() { + return StringUtils.EMPTY; + } - boolean isNativeTransportEnabled(); + default int maxCommitLogsRestore() { + return 10; + } - int getConcurrentReadsCnt(); + default boolean isClientSslEnabled() { + return false; + } - int getConcurrentWritesCnt(); + default String getInternodeEncryption() { + return "none"; + } - int getConcurrentCompactorsCnt(); + default boolean isDynamicSnitchEnabled() { + return true; + } - String getRpcServerType(); + default boolean isNativeTransportEnabled() { + return true; + } - int getRpcMinThreads(); + default int getConcurrentReadsCnt() { + return 32; + } - int getRpcMaxThreads(); + default int getConcurrentWritesCnt() { + return 32; + } - int getIndexInterval(); + default int getConcurrentCompactorsCnt() { + return Runtime.getRuntime().availableProcessors(); + } /* * @return the warning threshold in MB's for large partitions encountered during compaction. * Default value of 100 is used (default from cassandra.yaml) */ - int getCompactionLargePartitionWarnThresholdInMB(); + default int getCompactionLargePartitionWarnThresholdInMB() { + return 100; + } - String getExtraConfigParams(); + default String getExtraConfigParams() { + return StringUtils.EMPTY; + } String getCassYamlVal(String priamKey); - boolean getAutoBoostrap(); - - //if using with Datastax Enterprise - String getDseClusterType(); + default boolean getAutoBoostrap() { + return true; + } - boolean isCreateNewTokenEnable(); + default boolean isCreateNewTokenEnable() { + return true; + } /* * @return the location on disk of the private key used by the cryptography algorithm */ - String getPrivateKeyLocation(); + default String getPrivateKeyLocation() { + return StringUtils.EMPTY; + } - /* - * @return the type of source for the restore. Valid values are: AWSCROSSACCT or GOOGLE. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. - * - * AWSCROSSACCT - * - You are restoring from an AWS account which requires cross account assumption where an IAM user in one account is allowed to access resources that belong - * to a different account. - * - * GOOGLE - * - You are restoring from Google Cloud Storage - * - */ - String getRestoreSourceType(); + /** + * @return the type of source for the restore. Valid values are: AWSCROSSACCT or GOOGLE. Note: + * for backward compatibility, this property should be optional. Specifically, if it does + * not exist, it should not cause an adverse impact on current functionality. + *

AWSCROSSACCT - You are restoring from an AWS account which requires cross account + * assumption where an IAM user in one account is allowed to access resources that belong to + * a different account. + *

GOOGLE - You are restoring from Google Cloud Storage + */ + default String getRestoreSourceType() { + return StringUtils.EMPTY; + } - /* - * @return true to enable encryption of backup (snapshots, incrementals, commit logs). - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + /** + * Should backups be encrypted. If this is on, then all the files uploaded will be compressed + * and encrypted before being uploaded to remote file system. + * + * @return true to enable encryption of backup (snapshots, incrementals, commit logs). Note: for + * backward compatibility, this property should be optional. Specifically, if it does not + * exist, it should not cause an adverse impact on current functionality. */ - boolean isEncryptBackupEnabled(); + default boolean isEncryptBackupEnabled() { + return false; + } /** * Data that needs to be restored is encrypted? - * @return true if data that needs to be restored is encrypted. Note that setting this value does not play any role until {@link #getRestoreSnapshot()} is set to a non-null value. + * + * @return true if data that needs to be restored is encrypted. Note that setting this value + * does not play any role until {@link #getRestoreSnapshot()} is set to a non-null value. */ - boolean isRestoreEncrypted(); + default boolean isRestoreEncrypted() { + return false; + } - /* - * @return the Amazon Resource Name (ARN). This is applicable when restoring from an AWS account which requires cross account assumption. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + /** + * @return the Amazon Resource Name (ARN). This is applicable when restoring from an AWS account + * which requires cross account assumption. Note: for backward compatibility, this property + * should be optional. Specifically, if it does not exist, it should not cause an adverse + * impact on current functionality. */ - String getAWSRoleAssumptionArn(); + default String getAWSRoleAssumptionArn() { + return StringUtils.EMPTY; + } - /* + /** * @return Google Cloud Storage service account id to be use within the restore functionality. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + * Note: for backward compatibility, this property should be optional. Specifically, if it + * does not exist, it should not cause an adverse impact on current functionality. */ - String getGcsServiceAccountId(); + default String getGcsServiceAccountId() { + return StringUtils.EMPTY; + } - /* - * @return the absolute path on disk for the Google Cloud Storage PFX file (i.e. the combined format of the private key and certificate). - * This information is to be use within the restore functionality. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + /** + * @return the absolute path on disk for the Google Cloud Storage PFX file (i.e. the combined + * format of the private key and certificate). This information is to be use within the + * restore functionality. Note: for backward compatibility, this property should be + * optional. Specifically, if it does not exist, it should not cause an adverse impact on + * current functionality. */ - String getGcsServiceAccountPrivateKeyLoc(); + default String getGcsServiceAccountPrivateKeyLoc() { + return StringUtils.EMPTY; + } - /* - * @return the pass phrase use by PGP cryptography. This information is to be use within the restore and backup functionality when encryption is enabled. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + /** + * @return the pass phrase use by PGP cryptography. This information is to be use within the + * restore and backup functionality when encryption is enabled. Note: for backward + * compatibility, this property should be optional. Specifically, if it does not exist, it + * should not cause an adverse impact on current functionality. */ - String getPgpPasswordPhrase(); + default String getPgpPasswordPhrase() { + return StringUtils.EMPTY; + } - /* - * @return public key use by PGP cryptography. This information is to be use within the restore and backup functionality when encryption is enabled. - * Note: for backward compatibility, this property should be optional. Specifically, if it does not exist, it should not cause an adverse impact on current functionality. + /** + * @return public key use by PGP cryptography. This information is to be use within the restore + * and backup functionality when encryption is enabled. Note: for backward compatibility, + * this property should be optional. Specifically, if it does not exist, it should not cause + * an adverse impact on current functionality. */ - String getPgpPublicKeyLoc(); + default String getPgpPublicKeyLoc() { + return StringUtils.EMPTY; + } /** * Use this method for adding extra/ dynamic cassandra startup options or env properties * - * @return - */ - Map getExtraEnvParams(); - - /* - * @return the vpc id of the running instance. + * @return A map of extra paramaters. */ - String getVpcId(); + default Map getExtraEnvParams() { + return Collections.EMPTY_MAP; + } /* - * @return the Amazon Resource Name (ARN) for EC2 classic. + * @return the Amazon Resource Name (ARN) for EC2 classic. */ - String getClassicEC2RoleAssumptionArn(); + default String getClassicEC2RoleAssumptionArn() { + return StringUtils.EMPTY; + } /* - * @return the Amazon Resource Name (ARN) for VPC. + * @return the Amazon Resource Name (ARN) for VPC. */ - String getVpcEC2RoleAssumptionArn(); + default String getVpcEC2RoleAssumptionArn() { + return StringUtils.EMPTY; + } - /* + /** + * Is cassandra cluster spanning more than one account. This may be true if you are migrating + * your cluster from one account to another. + * * @return if the dual account support */ - boolean isDualAccount(); - - Boolean isIncrBackupParallelEnabled(); - - /* - * The number of workers for parallel uploads. - */ - int getIncrementalBkupMaxConsumers(); + default boolean isDualAccount() { + return false; + } - /* - * The max number of files queued to be uploaded. + /** + * Should incremental backup be uploaded in async fashion? If this is false, then incrementals + * will be in sync fashion. + * + * @return enable async incrementals for backup */ - int getUncrementalBkupQueueSize(); + default boolean enableAsyncIncremental() { + return false; + } /** - * @return tombstone_warn_threshold in C* yaml + * Should snapshot backup be uploaded in async fashion? If this is false, then snapshot will be + * in sync fashion. + * + * @return enable async snapshot for backup */ - int getTombstoneWarnThreshold(); + default boolean enableAsyncSnapshot() { + return false; + } /** - * @return tombstone_failure_threshold in C* yaml + * Queue size to be used for backup uploads. Note that once queue is full, we would wait for + * {@link #getUploadTimeout()} to add any new item before declining the request and throwing + * exception. + * + * @return size of the queue for uploads. */ - int getTombstoneFailureThreshold(); + default int getBackupQueueSize() { + return 100000; + } /** - * @return streaming_socket_timeout_in_ms in C* yaml + * Queue size to be used for file downloads. Note that once queue is full, we would wait for + * {@link #getDownloadTimeout()} to add any new item before declining the request and throwing + * exception. + * + * @return size of the queue for downloads. */ - int getStreamingSocketTimeoutInMS(); + default int getDownloadQueueSize() { + return 100000; + } /** - * List of keyspaces to flush. Default: all keyspaces. + * Uploads are scheduled in {@link #getBackupQueueSize()}. If queue is full then we wait for + * {@link #getUploadTimeout()} for the queue to have an entry available for queueing the current + * task after which we throw RejectedExecutionException. * - * @return a comma delimited list of keyspaces to flush + * @return timeout for uploads to wait to blocking queue */ - String getFlushKeyspaces(); + default long getUploadTimeout() { + return (2 * 60 * 60 * 1000L); // 2 minutes. + } /** - * Interval to be used for flush. + * Downloads are scheduled in {@link #getDownloadQueueSize()}. If queue is full then we wait for + * {@link #getDownloadTimeout()} for the queue to have an entry available for queueing the + * current task after which we throw RejectedExecutionException. * - * @return the interval to run the flush task. Format is name=value where - * “name” is an enum of hour, daily, value is ... - * @deprecated Use the {{@link #getFlushCronExpression()} instead. + * @return timeout for downloads to wait to blocking queue */ - @Deprecated - String getFlushInterval(); + default long getDownloadTimeout() { + return (10 * 60 * 60 * 1000L); // 10 minutes. + } + + /** @return tombstone_warn_threshold in C* yaml */ + default int getTombstoneWarnThreshold() { + return 1000; + } + + /** @return tombstone_failure_threshold in C* yaml */ + default int getTombstoneFailureThreshold() { + return 100000; + } + + /** @return streaming_keep_alive_period in seconds in C* yaml */ + default int getStreamingKeepAlivePeriod() { + return 300; + } /** - * Scheduler type to use for flush. + * List of keyspaces to flush. Default: all keyspaces. * - * @return Type of scheduler to use for flush. Note the default is TIMER based i.e. to use {@link #getFlushInterval()}. - * If value of "CRON" is provided it starts using {@link #getFlushCronExpression()}. + * @return a comma delimited list of keyspaces to flush */ - SchedulerType getFlushSchedulerType() throws UnsupportedTypeException; + default String getFlushKeyspaces() { + return StringUtils.EMPTY; + } /** * Cron expression to be used for flush. Use "-1" to disable the CRON. Default: -1 * * @return Cron expression for flush - * @see quartz-scheduler + * @see quartz-scheduler * @see http://www.cronmaker.com To build new cron timer */ - default String getFlushCronExpression(){ + default String getFlushCronExpression() { return "-1"; } + /** @return the absolute path to store the backup status on disk */ + default String getBackupStatusFileLoc() { + return getDataFileLocation() + File.separator + "backup.status"; + } + + /** @return Decides whether to use sudo to start C* or not */ + default boolean useSudo() { + return true; + } + /** - * @return the absolute path to store the backup status on disk + * This flag is an easy way to enable/disable notifications as notification topics may be + * different per region. A notification is only sent when this flag is enabled and {@link + * #getBackupNotificationTopicArn()} is not empty. + * + * @return true if backup notification is enabled, false otherwise. */ - String getBackupStatusFileLoc(); - - boolean useSudo(); + default boolean enableBackupNotification() { + return true; + } /** - * SNS Notification topic to be used for sending backup event notifications. - * One start event is sent before uploading any file and one complete/failure event is sent after the file is uploaded/failed. This applies to both incremental and snapshot. - * Default: no notifications i.e. this value is set to EMPTY VALUE + * SNS Notification topic to be used for sending backup event notifications. One start event is + * sent before uploading any file and one complete/failure event is sent after the file is + * uploaded/failed. This applies to both incremental and snapshot. Default: no notifications + * i.e. this value is set to EMPTY VALUE + * * @return SNS Topic ARN to be used to send notification. */ - String getBackupNotificationTopicArn(); + default String getBackupNotificationTopicArn() { + return StringUtils.EMPTY; + } /** - * Post restore hook enabled state. If enabled, jar represented by getPostRepairHook is called once download of files is complete, before starting Cassandra. + * Post restore hook enabled state. If enabled, jar represented by getPostRepairHook is called + * once download of files is complete, before starting Cassandra. + * * @return if post restore hook is enabled */ - boolean isPostRestoreHookEnabled(); + default boolean isPostRestoreHookEnabled() { + return false; + } /** * Post restore hook to be executed + * * @return post restore hook to be executed once restore is complete */ - String getPostRestoreHook(); - + default String getPostRestoreHook() { + return StringUtils.EMPTY; + } /** * HeartBeat file of post restore hook + * * @return file that indicates heartbeat of post restore hook */ - String getPostRestoreHookHeartbeatFileName(); - + default String getPostRestoreHookHeartbeatFileName() { + return "postrestorehook_heartbeat"; + } /** * Done file for post restore hook + * * @return file that indicates completion of post restore hook */ - String getPostRestoreHookDoneFileName(); + default String getPostRestoreHookDoneFileName() { + return "postrestorehook_done"; + } /** * Maximum time Priam has to wait for post restore hook sub-process to complete successfully + * * @return time out for post restore hook in days */ - int getPostRestoreHookTimeOutInDays(); + default int getPostRestoreHookTimeOutInDays() { + return 2; + } /** * Heartbeat timeout (in ms) for post restore hook + * * @return heartbeat timeout for post restore hook */ default int getPostRestoreHookHeartBeatTimeoutInMs() { @@ -798,9 +969,194 @@ default int getPostRestoreHookHeartBeatTimeoutInMs() { /** * Heartbeat check frequency (in ms) for post restore hook + * * @return heart beat check frequency for post restore hook */ default int getPostRestoreHookHeartbeatCheckFrequencyInMs() { return 120000; } + + /** + * Grace period in days for the file that 'could' be output of a long-running compaction job. + * Note that cassandra creates output of the compaction as non-tmp-link files (whole SSTable) + * but are still not part of the final "view" and thus not part of a snapshot. Another common + * issue is "index.db" published "way" before other component files. Thus index file has + * modification time before other files . + * + *

This value is used to TTL the backups and to consider file which are forgotten by + * Cassandra. Default: 5 + * + * @return grace period for the compaction output forgotten files. + */ + default int getGracePeriodDaysForCompaction() { + return 5; + } + + /** + * Grace period in days for which a file is not considered forgotten by cassandra (that would be + * deleted by cassandra) as file could be used in the read path of the cassandra. Note that read + * path could imply streaming to a joining neighbor or for repair. When cassandra is done with a + * compaction, the input files to compaction, are removed from the "view" and thus not part of + * snapshot, but these files may very well be used for streaming, repair etc and thus cannot be + * removed. + * + * @return grace period in days for read path forgotten files. + */ + default int getForgottenFileGracePeriodDaysForRead() { + return 3; + } + + /** + * If any forgotten file is found in Cassandra, it is usually good practice to move/delete them + * so when cassandra restarts, it does not load old data which should be removed else you may + * run into data resurrection issues. This behavior is fixed in 3.x. This configuration will + * allow Priam to move the forgotten files to a "lost_found" directory for user to review at + * later time at the same time ensuring that Cassandra does not resurrect data. + * + * @return true if Priam should move forgotten file to "lost_found" directory of that CF. + */ + default boolean isForgottenFileMoveEnabled() { + return false; + } + + /** + * A method for allowing access to outside programs to Priam configuration when paired with the + * Priam configuration HTTP endpoint at /v1/config/structured/all/property + * + * @param group The group of configuration options to return, currently just returns everything + * no matter what + * @return A Map representation of this configuration, or null if the method doesn't exist + */ + @SuppressWarnings("unchecked") + @JsonIgnore + default Map getStructuredConfiguration(String group) { + ObjectMapper objectMapper = new ObjectMapper(); + return objectMapper.convertValue(this, Map.class); + } + + /** + * Cron expression to be used for persisting Priam merged configuration to disk. Use "-1" to + * disable the CRON. This will persist the fully merged value of Priam's configuration to the + * {@link #getMergedConfigurationDirectory()} as two JSON files: structured.json and + * unstructured.json which persist structured config and unstructured config respectively. We + * recommend you only rely on unstructured for the time being until the structured interface is + * finalized. + * + *

Default: every minute + * + * @return Cron expression for merged configuration writing + * @see quartz-scheduler + * @see http://www.cronmaker.com To build new cron timer + */ + default String getMergedConfigurationCronExpression() { + // Every minute on the top of the minute. + return "0 * * * * ? *"; + } + + /** + * Returns the path to the directory that Priam should write merged configuration to. Note that + * if you disable the merged configuration cron above {@link + * #getMergedConfigurationCronExpression()} then this directory is not created or used + * + * @return A string representation of the path to the merged priam configuration directory. + */ + default String getMergedConfigurationDirectory() { + return "/tmp/priam_configuration"; + } + + /** + * Return a list of property file paths from the configuration directory by Priam that should be + * tuned. + * + * @return the files paths + */ + default ImmutableSet getTunablePropertyFiles() { + return ImmutableSet.of(); + } + + /** + * @return true to use private IPs for seeds and insertion into the Token DB false otherwise. + */ + default boolean usePrivateIP() { + return getSnitch().equals("org.apache.cassandra.locator.GossipingPropertyFileSnitch"); + } + + /** + * @return BackupsToCompress UNCOMPRESSED means compress backups only when the files are not + * already compressed by Cassandra + */ + default BackupsToCompress getBackupsToCompress() { + return BackupsToCompress.ALL; + } + + /** @return the way how data files are accessed. Default value is auto. */ + default String getDiskAccessMode() { + return "auto"; + } + + /** + * @return true if Priam should skip deleting ingress rules for IPs not found in the token + * database. + */ + default boolean skipDeletingOthersIngressRules() { + return false; + } + + /** + * @return true if Priam should skip updating ingress rules for ips found in the token database. + */ + default boolean skipUpdatingOthersIngressRules() { + return false; + } + + /** + * @return true if Priam should skip ingress on an IP address from the token database unless it + * can confirm that it is public + */ + default boolean skipIngressUnlessIPIsPublic() { + return false; + } + + default boolean permitDirectTokenAssignmentWithGossipMismatch() { + return false; + } + + /** returns how long a snapshot backup should take to upload in minutes */ + default int getTargetMinutesToCompleteSnaphotUpload() { + return 0; + } + + /** + * @return the percentage off of the old rate that the current rate must be to trigger a new + * rate in the dynamic rate limiter + */ + default double getRateLimitChangeThreshold() { + return 0.1; + } + + default boolean addMD5ToBackupUploads() { + return false; + } + + /** + * If a backup file's last-modified time is before this time, revert to SNAPPY compression. + * Otherwise, choose compression using the default logic based on getBackupsToCompress(). + * + * @return the milliseconds since the epoch of the transition time. + */ + default long getCompressionTransitionEpochMillis() { + return 0L; + } + + /** + * Escape hatch for getting any arbitrary property by key This is useful so we don't have to + * keep adding methods to this interface for every single configuration option ever. Also + * exposed via HTTP at v1/config/unstructured/X + * + * @param key The arbitrary configuration property to look up + * @param defaultValue The default value to return if the key is not found. + * @return The result for the property, or the defaultValue if provided (null otherwise) + */ + String getProperty(String key, String defaultValue); } diff --git a/priam/src/main/java/com/netflix/priam/config/PriamConfiguration.java b/priam/src/main/java/com/netflix/priam/config/PriamConfiguration.java index 140943db4..ebde99fe1 100644 --- a/priam/src/main/java/com/netflix/priam/config/PriamConfiguration.java +++ b/priam/src/main/java/com/netflix/priam/config/PriamConfiguration.java @@ -16,391 +16,49 @@ */ package com.netflix.priam.config; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2Client; -import com.amazonaws.services.ec2.model.*; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.priam.configSource.IConfigSource; -import com.netflix.priam.cred.ICredential; -import com.netflix.priam.identity.InstanceEnvIdentity; -import com.netflix.priam.identity.config.InstanceDataRetriever; -import com.netflix.priam.scheduler.SchedulerType; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.scheduler.UnsupportedTypeException; import com.netflix.priam.tuner.GCType; -import com.netflix.priam.tuner.JVMOption; -import com.netflix.priam.tuner.JVMOptionsTuner; -import com.netflix.priam.utils.RetryableCallable; -import com.netflix.priam.utils.SystemUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Singleton public class PriamConfiguration implements IConfiguration { public static final String PRIAM_PRE = "priam"; - private static final String CONFIG_CASS_HOME_DIR = PRIAM_PRE + ".cass.home"; - private static final String CONFIG_CASS_START_SCRIPT = PRIAM_PRE + ".cass.startscript"; - private static final String CONFIG_CASS_STOP_SCRIPT = PRIAM_PRE + ".cass.stopscript"; - private static final String CONFIG_CASS_USE_SUDO = PRIAM_PRE + ".cass.usesudo"; - private static final String CONFIG_CLUSTER_NAME = PRIAM_PRE + ".clustername"; - private static final String CONFIG_SEED_PROVIDER_NAME = PRIAM_PRE + ".seed.provider"; - private static final String CONFIG_LOAD_LOCAL_PROPERTIES = PRIAM_PRE + ".localbootstrap.enable"; - private static final String CONFIG_MAX_HEAP_SIZE = PRIAM_PRE + ".heap.size."; - private static final String CONFIG_DATA_LOCATION = PRIAM_PRE + ".data.location"; - private static final String CONFIG_LOGS_LOCATION = PRIAM_PRE + ".logs.location"; - private static final String CONFIG_MR_ENABLE = PRIAM_PRE + ".multiregion.enable"; - private static final String CONFIG_CL_LOCATION = PRIAM_PRE + ".commitlog.location"; - private static final String CONFIG_JMX_LISTERN_PORT_NAME = PRIAM_PRE + ".jmx.port"; - private static final String CONFIG_JMX_USERNAME = PRIAM_PRE + ".jmx.username"; - private static final String CONFIG_JMX_PASSWORD = PRIAM_PRE + ".jmx.password"; - private static final String CONFIG_JMX_ENABLE_REMOTE = PRIAM_PRE + ".jmx.remote.enable"; - private static final String CONFIG_AVAILABILITY_ZONES = PRIAM_PRE + ".zones.available"; - private static final String CONFIG_SAVE_CACHE_LOCATION = PRIAM_PRE + ".cache.location"; - private static final String CONFIG_NEW_MAX_HEAP_SIZE = PRIAM_PRE + ".heap.newgen.size."; - private static final String CONFIG_DIRECT_MAX_HEAP_SIZE = PRIAM_PRE + ".direct.memory.size."; - private static final String CONFIG_THRIFT_LISTEN_PORT_NAME = PRIAM_PRE + ".thrift.port"; - private static final String CONFIG_THRIFT_ENABLED = PRIAM_PRE + ".thrift.enabled"; - private static final String CONFIG_NATIVE_PROTOCOL_PORT = PRIAM_PRE + ".nativeTransport.port"; - private static final String CONFIG_NATIVE_PROTOCOL_ENABLED = PRIAM_PRE + ".nativeTransport.enabled"; - private static final String CONFIG_STORAGE_LISTERN_PORT_NAME = PRIAM_PRE + ".storage.port"; - private static final String CONFIG_SSL_STORAGE_LISTERN_PORT_NAME = PRIAM_PRE + ".ssl.storage.port"; - private static final String CONFIG_CL_BK_LOCATION = PRIAM_PRE + ".backup.commitlog.location"; - private static final String CONFIG_THROTTLE_UPLOAD_PER_SECOND = PRIAM_PRE + ".upload.throttle"; - private static final String CONFIG_IN_MEMORY_COMPACTION_LIMIT = PRIAM_PRE + ".memory.compaction.limit"; - private static final String CONFIG_COMPACTION_THROUHPUT = PRIAM_PRE + ".compaction.throughput"; - private static final String CONFIG_MAX_HINT_WINDOW_IN_MS = PRIAM_PRE + ".hint.window"; - private static final String CONFIG_HINT_DELAY = PRIAM_PRE + ".hint.delay"; - private static final String CONFIG_BOOTCLUSTER_NAME = PRIAM_PRE + ".bootcluster"; - private static final String CONFIG_ENDPOINT_SNITCH = PRIAM_PRE + ".endpoint_snitch"; - private static final String CONFIG_MEMTABLE_TOTAL_SPACE = PRIAM_PRE + ".memtabletotalspace"; - private static final String CONFIG_MEMTABLE_CLEANUP_THRESHOLD = PRIAM_PRE + ".memtable.cleanup.threshold"; - private static final String CONFIG_CASS_PROCESS_NAME = PRIAM_PRE + ".cass.process"; - private static final String CONFIG_VNODE_NUM_TOKENS = PRIAM_PRE + ".vnodes.numTokens"; - private static final String CONFIG_YAML_LOCATION = PRIAM_PRE + ".yamlLocation"; - private static final String CONFIG_AUTHENTICATOR = PRIAM_PRE + ".authenticator"; - private static final String CONFIG_AUTHORIZER = PRIAM_PRE + ".authorizer"; - private static final String CONFIG_TARGET_KEYSPACE_NAME = PRIAM_PRE + ".target.keyspace"; - private static final String CONFIG_TARGET_COLUMN_FAMILY_NAME = PRIAM_PRE + ".target.columnfamily"; - private static final String CONFIG_CASS_MANUAL_START_ENABLE = PRIAM_PRE + ".cass.manual.start.enable"; - private static final String CONFIG_REMEDIATE_DEAD_CASSANDRA_RATE_S = PRIAM_PRE + ".remediate.dead.cassandra.rate"; - private static final String CONFIG_CREATE_NEW_TOKEN_ENABLE = PRIAM_PRE + ".create.new.token.enable"; - - // Backup and Restore - private static final String CONFIG_BACKUP_THREADS = PRIAM_PRE + ".backup.threads"; - private static final String CONFIG_RESTORE_PREFIX = PRIAM_PRE + ".restore.prefix"; - private static final String CONFIG_INCR_BK_ENABLE = PRIAM_PRE + ".backup.incremental.enable"; - private static final String CONFIG_SNAPSHOT_KEYSPACE_FILTER = PRIAM_PRE + ".snapshot.keyspace.filter"; - private static final String CONFIG_SNAPSHOT_CF_FILTER = PRIAM_PRE + ".snapshot.cf.filter"; - private static final String CONFIG_INCREMENTAL_KEYSPACE_FILTER = PRIAM_PRE + ".incremental.keyspace.filter"; - private static final String CONFIG_INCREMENTAL_CF_FILTER = PRIAM_PRE + ".incremental.cf.filter"; - private static final String CONFIG_RESTORE_KEYSPACE_FILTER = PRIAM_PRE + ".restore.keyspace.filter"; - private static final String CONFIG_RESTORE_CF_FILTER = PRIAM_PRE + ".restore.cf.filter"; - - private static final String CONFIG_CL_BK_ENABLE = PRIAM_PRE + ".backup.commitlog.enable"; - private static final String CONFIG_AUTO_RESTORE_SNAPSHOTNAME = PRIAM_PRE + ".restore.snapshot"; - private static final String CONFIG_BUCKET_NAME = PRIAM_PRE + ".s3.bucket"; - private static final String CONFIG_BACKUP_SCHEDULE_TYPE = PRIAM_PRE + ".backup.schedule.type"; - private static final String CONFIG_BACKUP_HOUR = PRIAM_PRE + ".backup.hour"; - private static final String CONFIG_BACKUP_CRON_EXPRESSION = PRIAM_PRE + ".backup.cron"; - private static final String CONFIG_S3_BASE_DIR = PRIAM_PRE + ".s3.base_dir"; - private static final String CONFIG_RESTORE_THREADS = PRIAM_PRE + ".restore.threads"; - private static final String CONFIG_RESTORE_CLOSEST_TOKEN = PRIAM_PRE + ".restore.closesttoken"; - private static final String CONFIG_RESTORE_KEYSPACES = PRIAM_PRE + ".restore.keyspaces"; - private static final String CONFIG_BACKUP_CHUNK_SIZE = PRIAM_PRE + ".backup.chunksizemb"; - private static final String CONFIG_BACKUP_RETENTION = PRIAM_PRE + ".backup.retention"; - private static final String CONFIG_BACKUP_RACS = PRIAM_PRE + ".backup.racs"; - private static final String CONFIG_BACKUP_STATUS_FILE_LOCATION = PRIAM_PRE + ".backup.status.location"; - private static final String CONFIG_MULTITHREADED_COMPACTION = PRIAM_PRE + ".multithreaded.compaction"; - private static final String CONFIG_STREAMING_THROUGHPUT_MB = PRIAM_PRE + ".streaming.throughput.mb"; - private static final String CONFIG_STREAMING_SOCKET_TIMEOUT_IN_MS = PRIAM_PRE + ".streaming.socket.timeout.ms"; - private static final String CONFIG_TOMBSTONE_FAILURE_THRESHOLD = PRIAM_PRE + ".tombstone.failure.threshold"; - private static final String CONFIG_TOMBSTONE_WARNING_THRESHOLD = PRIAM_PRE + ".tombstone.warning.threshold"; - - private static final String CONFIG_PARTITIONER = PRIAM_PRE + ".partitioner"; - private static final String CONFIG_KEYCACHE_SIZE = PRIAM_PRE + ".keyCache.size"; - private static final String CONFIG_KEYCACHE_COUNT = PRIAM_PRE + ".keyCache.count"; - private static final String CONFIG_ROWCACHE_SIZE = PRIAM_PRE + ".rowCache.size"; - private static final String CONFIG_ROWCACHE_COUNT = PRIAM_PRE + ".rowCache.count"; - - private static final String CONFIG_MAX_HINT_THREADS = PRIAM_PRE + ".hints.maxThreads"; - private static final String CONFIG_HINTS_THROTTLE_KB = PRIAM_PRE + ".hints.throttleKb"; - private static final String CONFIG_INTERNODE_COMPRESSION = PRIAM_PRE + ".internodeCompression"; - - private static final String CONFIG_COMMITLOG_BKUP_ENABLED = PRIAM_PRE + ".clbackup.enabled"; - private static final String CONFIG_COMMITLOG_PROPS_FILE = PRIAM_PRE + ".clbackup.propsfile"; - private static final String CONFIG_COMMITLOG_ARCHIVE_CMD = PRIAM_PRE + ".clbackup.archiveCmd"; - private static final String CONFIG_COMMITLOG_RESTORE_CMD = PRIAM_PRE + ".clbackup.restoreCmd"; - private static final String CONFIG_COMMITLOG_RESTORE_DIRS = PRIAM_PRE + ".clbackup.restoreDirs"; - private static final String CONFIG_COMMITLOG_RESTORE_POINT_IN_TIME = PRIAM_PRE + ".clbackup.restoreTime"; - private static final String CONFIG_COMMITLOG_RESTORE_MAX = PRIAM_PRE + ".clrestore.max"; - private static final String CONFIG_CLIENT_SSL_ENABLED = PRIAM_PRE + ".client.sslEnabled"; - private static final String CONFIG_INTERNODE_ENCRYPTION = PRIAM_PRE + ".internodeEncryption"; - private static final String CONFIG_DSNITCH_ENABLED = PRIAM_PRE + ".dsnitchEnabled"; - - private static final String CONFIG_CONCURRENT_READS = PRIAM_PRE + ".concurrentReads"; - private static final String CONFIG_CONCURRENT_WRITES = PRIAM_PRE + ".concurrentWrites"; - private static final String CONFIG_CONCURRENT_COMPACTORS = PRIAM_PRE + ".concurrentCompactors"; - - private static final String CONFIG_RPC_SERVER_TYPE = PRIAM_PRE + ".rpc.server.type"; - private static final String CONFIG_RPC_MIN_THREADS = PRIAM_PRE + ".rpc.min.threads"; - private static final String CONFIG_RPC_MAX_THREADS = PRIAM_PRE + ".rpc.max.threads"; - private static final String CONFIG_INDEX_INTERVAL = PRIAM_PRE + ".index.interval"; - private static final String CONFIG_EXTRA_PARAMS = PRIAM_PRE + ".extra.params"; - private static final String CONFIG_AUTO_BOOTSTRAP = PRIAM_PRE + ".auto.bootstrap"; - private static final String CONFIG_DSE_CLUSTER_TYPE = PRIAM_PRE + ".dse.cluster.type"; - private static final String CONFIG_EXTRA_ENV_PARAMS = PRIAM_PRE + ".extra.env.params"; - - private static final String CONFIG_RESTORE_SOURCE_TYPE = PRIAM_PRE + ".restore.source.type"; //the type of source for the restore. Valid values are: AWSCROSSACCT or GOOGLE. - private static final String CONFIG_ENCRYPTED_BACKUP_ENABLED = PRIAM_PRE + ".encrypted.backup.enabled"; //enable encryption of backup (snapshots, incrementals, commit logs). - - //Backup and restore cryptography - private static final String CONFIG_PRIKEY_LOC = PRIAM_PRE + ".private.key.location"; //the location on disk of the private key used by the cryptography algorithm - private static final String CONFIG_PGP_PASSWORD_PHRASE = PRIAM_PRE + ".pgp.password.phrase"; //pass phrase used by the cryptography algorithm - private static final String CONFIG_PGP_PUB_KEY_LOC = PRIAM_PRE + ".pgp.pubkey.file.location"; - - //Restore from Google Cloud Storage - private static final String CONFIG_GCS_SERVICE_ACCT_ID = PRIAM_PRE + ".gcs.service.acct.id"; //Google Cloud Storage service account id - private static final String CONFIG_GCS_SERVICE_ACCT_PRIVATE_KEY_LOC = PRIAM_PRE + ".gcs.service.acct.private.key"; //the absolute path on disk for the Google Cloud Storage PFX file (i.e. the combined format of the private key and certificate). - - // Amazon specific - private static final String CONFIG_ASG_NAME = PRIAM_PRE + ".az.asgname"; - private static final String CONFIG_SIBLING_ASG_NAMES = PRIAM_PRE + ".az.sibling.asgnames"; - private static final String CONFIG_REGION_NAME = PRIAM_PRE + ".az.region"; - private static final String SDB_INSTANCE_INDENTITY_REGION_NAME = PRIAM_PRE + ".sdb.instanceIdentity.region"; - private static final String CONFIG_ACL_GROUP_NAME = PRIAM_PRE + ".acl.groupname"; - private static String ASG_NAME = System.getenv("ASG_NAME"); - private static String REGION = System.getenv("EC2_REGION"); - private static final String CONFIG_VPC_RING = PRIAM_PRE + ".vpc"; - private static final String CONFIG_S3_ROLE_ASSUMPTION_ARN = PRIAM_PRE + ".roleassumption.arn"; //Restore from AWS. This is applicable when restoring from an AWS account which requires cross account assumption. - private static final String CONFIG_EC2_ROLE_ASSUMPTION_ARN = PRIAM_PRE + ".ec2.roleassumption.arn"; - private static final String CONFIG_VPC_ROLE_ASSUMPTION_ARN = PRIAM_PRE + ".vpc.roleassumption.arn"; - private static final String CONFIG_DUAL_ACCOUNT = PRIAM_PRE + ".roleassumption.dualaccount"; - - //Post Restore Hook - private static final String CONFIG_POST_RESTORE_HOOK_ENABLED = PRIAM_PRE + ".postrestorehook.enabled"; - private static final String CONFIG_POST_RESTORE_HOOK = PRIAM_PRE + ".postrestorehook"; - private static final String CONFIG_POST_RESTORE_HOOK_HEARTBEAT_FILENAME = PRIAM_PRE + ".postrestorehook.heartbeat.filename"; - private static final String CONFIG_POST_RESTORE_HOOK_DONE_FILENAME = PRIAM_PRE + ".postrestorehook.done.filename"; - private static final String CONFIG_POST_RESTORE_HOOK_TIMEOUT_IN_DAYS = PRIAM_PRE + ".postrestorehook.timeout.in.days"; - private static final String CONFIG_POST_RESTORE_HOOK_HEARTBEAT_TIMEOUT_MS = PRIAM_PRE + ".postrestorehook.heartbeat.timeout"; - private static final String CONFIG_POST_RESTORE_HOOK_HEARTBEAT_CHECK_FREQUENCY_MS = PRIAM_PRE + ".postrestorehook.heartbeat.check.frequency"; - - //Running instance meta data - private String RAC; - - //== vpc specific - private String NETWORK_VPC; //Fetch the vpc id of running instance - - // Defaults - private final String DEFAULT_CLUSTER_NAME = "cass_cluster"; - private final String CASS_BASE_DATA_DIR = "/var/lib/cassandra"; - private final String DEFAULT_DATA_LOCATION = CASS_BASE_DATA_DIR + "/data"; - private final String DEFAULT_LOGS_LOCATION = CASS_BASE_DATA_DIR +"/logs"; - private final String DEFAULT_COMMIT_LOG_LOCATION = "/var/lib/cassandra/commitlog"; - private final String DEFAULT_CACHE_LOCATION = "/var/lib/cassandra/saved_caches"; - private final String DEFAULT_HINTS_DIR_LOCATION = "/var/lib/cassandra/hints"; - private final String DEFAULT_ENDPOINT_SNITCH = "org.apache.cassandra.locator.Ec2Snitch"; - private final String DEFAULT_SEED_PROVIDER = "com.netflix.priam.cassandra.extensions.NFSeedProvider"; - private final String DEFAULT_PARTITIONER = "org.apache.cassandra.dht.RandomPartitioner"; - public static final String DEFAULT_AUTHENTICATOR = "org.apache.cassandra.auth.AllowAllAuthenticator"; - public static final String DEFAULT_AUTHORIZER = "org.apache.cassandra.auth.AllowAllAuthorizer"; - public static final String DEFAULT_COMMITLOG_PROPS_FILE = "/conf/commitlog_archiving.properties"; - - // rpm based. Can be modified for tar based. - private final String DEFAULT_CASS_HOME_DIR = "/etc/cassandra"; - private final String DEFAULT_CASS_START_SCRIPT = "/etc/init.d/cassandra start"; - private final String DEFAULT_CASS_STOP_SCRIPT = "/etc/init.d/cassandra stop"; - private final String DEFAULT_BACKUP_LOCATION = "backup"; - private final String DEFAULT_BUCKET_NAME = "cassandra-archive"; - // private String DEFAULT_AVAILABILITY_ZONES = ""; - private List DEFAULT_AVAILABILITY_ZONES = ImmutableList.of(); - private final String DEFAULT_CASS_PROCESS_NAME = "CassandraDaemon"; - - private final String DEFAULT_MAX_DIRECT_MEM = "50G"; - private final String DEFAULT_MAX_HEAP = "8G"; - private final String DEFAULT_MAX_NEWGEN_HEAP = "2G"; - private final int DEFAULT_JMX_PORT = 7199; - private final int DEFAULT_THRIFT_PORT = 9160; - private final int DEFAULT_NATIVE_PROTOCOL_PORT = 9042; - private final int DEFAULT_STORAGE_PORT = 7000; - private final int DEFAULT_SSL_STORAGE_PORT = 7001; - private final int DEFAULT_BACKUP_HOUR = 12; - private final String DEFAULT_BACKUP_CRON_EXPRESSION = "0 0 12 1/1 * ? *"; //Backup daily at 12. - private final int DEFAULT_BACKUP_THREADS = 2; - private final int DEFAULT_RESTORE_THREADS = 8; - private final int DEFAULT_BACKUP_CHUNK_SIZE = 10; - private final int DEFAULT_BACKUP_RETENTION = 0; - private final int DEFAULT_VNODE_NUM_TOKENS = 1; - private final int DEFAULT_HINTS_MAX_THREADS = 2; //default value from 1.2 yaml - private final int DEFAULT_HINTS_THROTTLE_KB = 1024; //default value from 1.2 yaml - private final String DEFAULT_INTERNODE_COMPRESSION = "all"; //default value from 1.2 yaml - // Default to restarting Cassandra automatically once per hour. - private final int DEFAULT_REMEDIATE_DEAD_CASSANDRA_RATE_S = 60 * 60; - - private static final String DEFAULT_RPC_SERVER_TYPE = "hsha"; - private static final int DEFAULT_RPC_MIN_THREADS = 16; - private static final int DEFAULT_RPC_MAX_THREADS = 2048; - private static final int DEFAULT_INDEX_INTERVAL = 256; - private static final int DEFAULT_STREAMING_SOCKET_TIMEOUT_IN_MS = 86400000; // 24 Hours - private static final int DEFAULT_TOMBSTONE_WARNING_THRESHOLD = 1000; // C* defaults - private static final int DEFAULT_TOMBSTONE_FAILURE_THRESHOLD = 100000;// C* defaults - - // AWS EC2 Dual Account - private static final boolean DEFAULT_DUAL_ACCOUNT = false; - private final IConfigSource config; private static final Logger logger = LoggerFactory.getLogger(PriamConfiguration.class); - private final ICredential provider; - private InstanceEnvIdentity insEnvIdentity; - private InstanceDataRetriever instanceDataRetriever; + @JsonIgnore private InstanceInfo instanceInfo; @Inject - public PriamConfiguration(ICredential provider, IConfigSource config, InstanceEnvIdentity insEnvIdentity) { - this.provider = provider; + public PriamConfiguration(IConfigSource config, InstanceInfo instanceInfo) { this.config = config; - this.insEnvIdentity = insEnvIdentity; + this.instanceInfo = instanceInfo; } @Override - public void intialize() { - try { - if (this.insEnvIdentity.isClassic()) { - this.instanceDataRetriever = (InstanceDataRetriever) Class.forName("com.netflix.priam.identity.config.AwsClassicInstanceDataRetriever").newInstance(); - - } else if (this.insEnvIdentity.isNonDefaultVpc()) { - this.instanceDataRetriever = (InstanceDataRetriever) Class.forName("com.netflix.priam.identity.config.AWSVpcInstanceDataRetriever").newInstance(); - } else { - throw new IllegalStateException("Unable to determine environemt (vpc, classic) for running instance."); - } - } catch (Exception e) { - throw new IllegalStateException("Exception when instantiating the instance data retriever. Msg: " + e.getLocalizedMessage()); - } - - RAC = instanceDataRetriever.getRac(); - - NETWORK_VPC = instanceDataRetriever.getVpcId(); - - setupEnvVars(); - this.config.intialize(ASG_NAME, REGION); - setDefaultRACList(REGION); - populateProps(); - SystemUtils.createDirs(getBackupCommitLogLocation()); - SystemUtils.createDirs(getCommitLogLocation()); - SystemUtils.createDirs(getCacheLocation()); - SystemUtils.createDirs(getDataFileLocation()); - SystemUtils.createDirs(getHintsLocation()); - SystemUtils.createDirs(getLogDirLocation()); - } - - public InstanceDataRetriever getInstanceDataRetriever() { - return instanceDataRetriever; - } - - private void setupEnvVars() { - // Search in java opt properties - REGION = StringUtils.isBlank(REGION) ? System.getProperty("EC2_REGION") : REGION; - // Infer from zone - if (StringUtils.isBlank(REGION)) - REGION = RAC.substring(0, RAC.length() - 1); - ASG_NAME = StringUtils.isBlank(ASG_NAME) ? System.getProperty("ASG_NAME") : ASG_NAME; - if (StringUtils.isBlank(ASG_NAME)) - ASG_NAME = populateASGName(REGION, getInstanceDataRetriever().getInstanceId()); - logger.info("REGION set to {}, ASG Name set to {}", REGION, ASG_NAME); - } - - /** - * Query amazon to get ASG name. Currently not available as part of instance - * info api. - */ - private String populateASGName(String region, String instanceId) { - GetASGName getASGName = new GetASGName(region, instanceId); - - try { - return getASGName.call(); - } catch (Exception e) { - logger.error("Failed to determine ASG name.", e); - return null; - } - } - - private class GetASGName extends RetryableCallable { - private static final int NUMBER_OF_RETRIES = 15; - private static final long WAIT_TIME = 30000; - private final String region; - private final String instanceId; - private final AmazonEC2 client; - - public GetASGName(String region, String instanceId) { - super(NUMBER_OF_RETRIES, WAIT_TIME); - this.region = region; - this.instanceId = instanceId; - client = new AmazonEC2Client(provider.getAwsCredentialProvider()); - client.setEndpoint("ec2." + region + ".amazonaws.com"); - } - - @Override - public String retriableCall() throws IllegalStateException { - DescribeInstancesRequest desc = new DescribeInstancesRequest().withInstanceIds(instanceId); - DescribeInstancesResult res = client.describeInstances(desc); - - for (Reservation resr : res.getReservations()) { - for (Instance ins : resr.getInstances()) { - for (com.amazonaws.services.ec2.model.Tag tag : ins.getTags()) { - if (tag.getKey().equals("aws:autoscaling:groupName")) - return tag.getValue(); - } - } - } - - logger.warn("Couldn't determine ASG name"); - throw new IllegalStateException("Couldn't determine ASG name"); - } - } - - /** - * Get the fist 3 available zones in the region - */ - public void setDefaultRACList(String region) { - AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider()); - client.setEndpoint("ec2." + region + ".amazonaws.com"); - DescribeAvailabilityZonesResult res = client.describeAvailabilityZones(); - List zone = Lists.newArrayList(); - for (AvailabilityZone reg : res.getAvailabilityZones()) { - if (reg.getState().equals("available")) - zone.add(reg.getZoneName()); - if (zone.size() == 3) - break; - } - DEFAULT_AVAILABILITY_ZONES = ImmutableList.copyOf(zone); - } - - private void populateProps() { - config.set(CONFIG_ASG_NAME, ASG_NAME); - config.set(CONFIG_REGION_NAME, REGION); - } - - public String getInstanceName(){ - return instanceDataRetriever.getInstanceId(); + public void initialize() { + this.config.initialize(instanceInfo.getAutoScalingGroup(), instanceInfo.getRegion()); } @Override public String getCassStartupScript() { - return config.get(CONFIG_CASS_START_SCRIPT, DEFAULT_CASS_START_SCRIPT); + return config.get(PRIAM_PRE + ".cass.startscript", "/etc/init.d/cassandra start"); } @Override public String getCassStopScript() { - return config.get(CONFIG_CASS_STOP_SCRIPT, DEFAULT_CASS_STOP_SCRIPT); + return config.get(PRIAM_PRE + ".cass.stopscript", "/etc/init.d/cassandra stop"); } @Override @@ -410,200 +68,164 @@ public int getGracefulDrainHealthWaitSeconds() { @Override public int getRemediateDeadCassandraRate() { - return config.get(CONFIG_REMEDIATE_DEAD_CASSANDRA_RATE_S, DEFAULT_REMEDIATE_DEAD_CASSANDRA_RATE_S); + return config.get( + PRIAM_PRE + ".remediate.dead.cassandra.rate", 3600); // Default to once per hour } @Override public String getCassHome() { - return config.get(CONFIG_CASS_HOME_DIR, DEFAULT_CASS_HOME_DIR); + return config.get(PRIAM_PRE + ".cass.home", "/etc/cassandra"); } @Override public String getBackupLocation() { - return config.get(CONFIG_S3_BASE_DIR, DEFAULT_BACKUP_LOCATION); + return config.get(PRIAM_PRE + ".s3.base_dir", "backup"); } @Override public String getBackupPrefix() { - return config.get(CONFIG_BUCKET_NAME, DEFAULT_BUCKET_NAME); + return config.get(PRIAM_PRE + ".s3.bucket", "cassandra-archive"); } @Override public int getBackupRetentionDays() { - return config.get(CONFIG_BACKUP_RETENTION, DEFAULT_BACKUP_RETENTION); + return config.get(PRIAM_PRE + ".backup.retention", 0); } @Override public List getBackupRacs() { - return config.getList(CONFIG_BACKUP_RACS); + return config.getList(PRIAM_PRE + ".backup.racs"); } @Override public String getRestorePrefix() { - return config.get(CONFIG_RESTORE_PREFIX); - } - - @Override - public List getRestoreKeySpaces() { - return config.getList(CONFIG_RESTORE_KEYSPACES); + return config.get(PRIAM_PRE + ".restore.prefix"); } @Override public String getDataFileLocation() { - return config.get(CONFIG_DATA_LOCATION, DEFAULT_DATA_LOCATION); + return config.get(PRIAM_PRE + ".data.location", getCassandraBaseDirectory() + "/data"); } @Override public String getLogDirLocation() { - return config.get(CONFIG_LOGS_LOCATION, DEFAULT_LOGS_LOCATION); + return config.get(PRIAM_PRE + ".logs.location", getCassandraBaseDirectory() + "/logs"); } @Override public String getHintsLocation() { - return config.get(PRIAM_PRE + ".hints.location", DEFAULT_HINTS_DIR_LOCATION); + return config.get(PRIAM_PRE + ".hints.location", getCassandraBaseDirectory() + "/hints"); } @Override - public String getCacheLocation() - { - return config.get(CONFIG_SAVE_CACHE_LOCATION, DEFAULT_CACHE_LOCATION); + public String getCacheLocation() { + return config.get( + PRIAM_PRE + ".cache.location", getCassandraBaseDirectory() + "/saved_caches"); } @Override public String getCommitLogLocation() { - return config.get(CONFIG_CL_LOCATION, DEFAULT_COMMIT_LOG_LOCATION); + return config.get( + PRIAM_PRE + ".commitlog.location", getCassandraBaseDirectory() + "/commitlog"); } @Override public String getBackupCommitLogLocation() { - return config.get(CONFIG_CL_BK_LOCATION, ""); + return config.get(PRIAM_PRE + ".backup.commitlog.location", ""); } @Override public long getBackupChunkSize() { - long size = config.get(CONFIG_BACKUP_CHUNK_SIZE, DEFAULT_BACKUP_CHUNK_SIZE); + long size = config.get(PRIAM_PRE + ".backup.chunksizemb", 10); return size * 1024 * 1024L; } @Override public int getJmxPort() { - return config.get(CONFIG_JMX_LISTERN_PORT_NAME, DEFAULT_JMX_PORT); + return config.get(PRIAM_PRE + ".jmx.port", 7199); } @Override public String getJmxUsername() { - return config.get(CONFIG_JMX_USERNAME, ""); + return config.get(PRIAM_PRE + ".jmx.username", ""); } @Override public String getJmxPassword() { - return config.get(CONFIG_JMX_PASSWORD, ""); + return config.get(PRIAM_PRE + ".jmx.password", ""); } - /** - * @return Enables Remote JMX connections n C* - */ + /** @return Enables Remote JMX connections n C* */ @Override public boolean enableRemoteJMX() { - return config.get(CONFIG_JMX_ENABLE_REMOTE, false); + return config.get(PRIAM_PRE + ".jmx.remote.enable", false); } public int getNativeTransportPort() { - return config.get(CONFIG_NATIVE_PROTOCOL_PORT, DEFAULT_NATIVE_PROTOCOL_PORT); - } - - @Override - public int getThriftPort() { - return config.get(CONFIG_THRIFT_LISTEN_PORT_NAME, DEFAULT_THRIFT_PORT); + return config.get(PRIAM_PRE + ".nativeTransport.port", 9042); } @Override public int getStoragePort() { - return config.get(CONFIG_STORAGE_LISTERN_PORT_NAME, DEFAULT_STORAGE_PORT); + return config.get(PRIAM_PRE + ".storage.port", 7000); } @Override public int getSSLStoragePort() { - return config.get(CONFIG_SSL_STORAGE_LISTERN_PORT_NAME, DEFAULT_SSL_STORAGE_PORT); + return config.get(PRIAM_PRE + ".ssl.storage.port", 7001); } @Override public String getSnitch() { - return config.get(CONFIG_ENDPOINT_SNITCH, DEFAULT_ENDPOINT_SNITCH); + return config.get(PRIAM_PRE + ".endpoint_snitch", "org.apache.cassandra.locator.Ec2Snitch"); } @Override public String getAppName() { - return config.get(CONFIG_CLUSTER_NAME, DEFAULT_CLUSTER_NAME); - } - - @Override - public String getRac() { - return RAC; + return config.get(PRIAM_PRE + ".clustername", "cass_cluster"); } @Override public List getRacs() { - return config.getList(CONFIG_AVAILABILITY_ZONES, DEFAULT_AVAILABILITY_ZONES); - } - - @Override - public String getHostname() { - if (this.isVpcRing()) return getInstanceDataRetriever().getPrivateIP(); - else return getInstanceDataRetriever().getPublicHostname(); + return config.getList(PRIAM_PRE + ".zones.available", instanceInfo.getDefaultRacks()); } @Override public String getHeapSize() { - return config.get(CONFIG_MAX_HEAP_SIZE + getInstanceDataRetriever().getInstanceType(), DEFAULT_MAX_HEAP); + return config.get((PRIAM_PRE + ".heap.size.") + instanceInfo.getInstanceType(), "8G"); } @Override public String getHeapNewSize() { - return config.get(CONFIG_NEW_MAX_HEAP_SIZE + getInstanceDataRetriever().getInstanceType(), DEFAULT_MAX_NEWGEN_HEAP); + return config.get( + (PRIAM_PRE + ".heap.newgen.size.") + instanceInfo.getInstanceType(), "2G"); } @Override public String getMaxDirectMemory() { - return config.get(CONFIG_DIRECT_MAX_HEAP_SIZE + getInstanceDataRetriever().getInstanceType(), DEFAULT_MAX_DIRECT_MEM); - } - - @Override - public int getBackupHour() { - return config.get(CONFIG_BACKUP_HOUR, DEFAULT_BACKUP_HOUR); + return config.get( + (PRIAM_PRE + ".direct.memory.size.") + instanceInfo.getInstanceType(), "50G"); } @Override public String getBackupCronExpression() { - return config.get(CONFIG_BACKUP_CRON_EXPRESSION, DEFAULT_BACKUP_CRON_EXPRESSION); + return config.get(PRIAM_PRE + ".backup.cron", "0 0 12 1/1 * ? *"); // Backup daily at 12 } @Override - public SchedulerType getBackupSchedulerType() throws UnsupportedTypeException { - String schedulerType = config.get(CONFIG_BACKUP_SCHEDULE_TYPE, SchedulerType.HOUR.getSchedulerType()); - return SchedulerType.lookup(schedulerType); - } - - @Override - public GCType getGCType() throws UnsupportedTypeException{ + public GCType getGCType() throws UnsupportedTypeException { String gcType = config.get(PRIAM_PRE + ".gc.type", GCType.CMS.getGcType()); return GCType.lookup(gcType); } @Override - public Map getJVMExcludeSet() { - return JVMOptionsTuner.parseJVMOptions(config.get(PRIAM_PRE + ".jvm.options.exclude")); + public String getJVMExcludeSet() { + return config.get(PRIAM_PRE + ".jvm.options.exclude"); } @Override - public Map getJVMUpsertSet() { - return JVMOptionsTuner.parseJVMOptions(config.get(PRIAM_PRE + ".jvm.options.upsert")); - } - - @Override - public SchedulerType getFlushSchedulerType() throws UnsupportedTypeException { - String schedulerType = config.get(PRIAM_PRE + ".flush.schedule.type", SchedulerType.HOUR.getSchedulerType()); - return SchedulerType.lookup(schedulerType); + public String getJVMUpsertSet() { + return config.get(PRIAM_PRE + ".jvm.options.upsert"); } @Override @@ -627,330 +249,259 @@ public String getCompactionExcludeCFList() { } @Override - public String getSnapshotKeyspaceFilters() { - return config.get(CONFIG_SNAPSHOT_KEYSPACE_FILTER); + public String getSnapshotIncludeCFList() { + return config.get(PRIAM_PRE + ".snapshot.cf.include"); } @Override - public String getSnapshotCFFilter() throws IllegalArgumentException { - return config.get(CONFIG_SNAPSHOT_CF_FILTER); + public String getSnapshotExcludeCFList() { + return config.get(PRIAM_PRE + ".snapshot.cf.exclude"); } @Override - public String getIncrementalKeyspaceFilters() { - return config.get(CONFIG_INCREMENTAL_KEYSPACE_FILTER); + public String getIncrementalIncludeCFList() { + return config.get(PRIAM_PRE + ".incremental.cf.include"); } @Override - public String getIncrementalCFFilter() { - return config.get(CONFIG_INCREMENTAL_CF_FILTER); + public String getIncrementalExcludeCFList() { + return config.get(PRIAM_PRE + ".incremental.cf.exclude"); } @Override - public String getRestoreKeyspaceFilter() { - return config.get(CONFIG_RESTORE_KEYSPACE_FILTER); + public String getRestoreIncludeCFList() { + return config.get(PRIAM_PRE + ".restore.cf.include"); } @Override - public String getRestoreCFFilter() { - return config.get(CONFIG_RESTORE_CF_FILTER); + public String getRestoreExcludeCFList() { + return config.get(PRIAM_PRE + ".restore.cf.exclude"); } @Override public String getRestoreSnapshot() { - return config.get(CONFIG_AUTO_RESTORE_SNAPSHOTNAME, ""); + return config.get(PRIAM_PRE + ".restore.snapshot", ""); } @Override - public boolean isRestoreEncrypted(){ + public boolean isRestoreEncrypted() { return config.get(PRIAM_PRE + ".encrypted.restore.enabled", false); } @Override public String getSDBInstanceIdentityRegion() { - return config.get(SDB_INSTANCE_INDENTITY_REGION_NAME, "us-east-1"); - } - - @Override - public String getDC() { - return config.get(CONFIG_REGION_NAME, ""); - } - - @Override - public void setDC(String region) { - config.set(CONFIG_REGION_NAME, region); + return config.get(PRIAM_PRE + ".sdb.instanceIdentity.region", "us-east-1"); } @Override public boolean isMultiDC() { - return config.get(CONFIG_MR_ENABLE, false); + return config.get(PRIAM_PRE + ".multiregion.enable", false); } @Override - public int getMaxBackupUploadThreads() { - - return config.get(CONFIG_BACKUP_THREADS, DEFAULT_BACKUP_THREADS); + public int getBackupThreads() { + return config.get(PRIAM_PRE + ".backup.threads", 2); } @Override - public int getMaxBackupDownloadThreads() { - return config.get(CONFIG_RESTORE_THREADS, DEFAULT_RESTORE_THREADS); + public int getRestoreThreads() { + return config.get(PRIAM_PRE + ".restore.threads", 8); } @Override public boolean isRestoreClosestToken() { - return config.get(CONFIG_RESTORE_CLOSEST_TOKEN, false); + return config.get(PRIAM_PRE + ".restore.closesttoken", false); } - @Override - public String getASGName() { - return config.get(CONFIG_ASG_NAME, ""); - } - - /** - * Amazon specific setting to query Additional/ Sibling ASG Memberships in csv format to consider while calculating RAC membership - */ @Override public String getSiblingASGNames() { - return config.get(CONFIG_SIBLING_ASG_NAMES, ","); + return config.get(PRIAM_PRE + ".az.sibling.asgnames", ","); } @Override public String getACLGroupName() { - return config.get(CONFIG_ACL_GROUP_NAME, this.getAppName()); + return config.get(PRIAM_PRE + ".acl.groupname", this.getAppName()); } @Override - public boolean isIncrBackup() { - return config.get(CONFIG_INCR_BK_ENABLE, true); - } - - @Override - public String getHostIP() { - if (this.isVpcRing()) return getInstanceDataRetriever().getPrivateIP(); - else return getInstanceDataRetriever().getPublicIP(); + public boolean isIncrementalBackupEnabled() { + return config.get(PRIAM_PRE + ".backup.incremental.enable", true); } @Override public int getUploadThrottle() { - return config.get(CONFIG_THROTTLE_UPLOAD_PER_SECOND, Integer.MAX_VALUE); + return config.get(PRIAM_PRE + ".upload.throttle", -1); } @Override - public boolean isLocalBootstrapEnabled() { - return config.get(CONFIG_LOAD_LOCAL_PROPERTIES, false); + public int getRemoteFileSystemObjectExistsThrottle() { + return config.get(PRIAM_PRE + ".remoteFileSystemObjectExistThrottle", -1); } @Override - public int getInMemoryCompactionLimit() { - return config.get(CONFIG_IN_MEMORY_COMPACTION_LIMIT, 128); + public boolean isLocalBootstrapEnabled() { + return config.get(PRIAM_PRE + ".localbootstrap.enable", false); } @Override public int getCompactionThroughput() { - return config.get(CONFIG_COMPACTION_THROUHPUT, 8); + return config.get(PRIAM_PRE + ".compaction.throughput", 8); } @Override public int getMaxHintWindowInMS() { - return config.get(CONFIG_MAX_HINT_WINDOW_IN_MS, 10800000); + return config.get(PRIAM_PRE + ".hint.window", 10800000); } public int getHintedHandoffThrottleKb() { - return config.get(CONFIG_HINTS_THROTTLE_KB, DEFAULT_HINTS_THROTTLE_KB); - } - - public int getMaxHintThreads() { - return config.get(CONFIG_MAX_HINT_THREADS, DEFAULT_HINTS_MAX_THREADS); + return config.get(PRIAM_PRE + ".hints.throttleKb", 1024); } @Override public String getBootClusterName() { - return config.get(CONFIG_BOOTCLUSTER_NAME, ""); + return config.get(PRIAM_PRE + ".bootcluster", ""); } @Override public String getSeedProviderName() { - return config.get(CONFIG_SEED_PROVIDER_NAME, DEFAULT_SEED_PROVIDER); + return config.get( + PRIAM_PRE + ".seed.provider", + "com.netflix.priam.cassandra.extensions.NFSeedProvider"); } - /** - * memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) = 0.11 - */ public double getMemtableCleanupThreshold() { - return config.get(CONFIG_MEMTABLE_CLEANUP_THRESHOLD, 0.11); + return config.get(PRIAM_PRE + ".memtable.cleanup.threshold", 0.11); } @Override public int getStreamingThroughputMB() { - return config.get(CONFIG_STREAMING_THROUGHPUT_MB, 400); + return config.get(PRIAM_PRE + ".streaming.throughput.mb", 400); } public String getPartitioner() { - return config.get(CONFIG_PARTITIONER, DEFAULT_PARTITIONER); + return config.get(PRIAM_PRE + ".partitioner", "org.apache.cassandra.dht.RandomPartitioner"); } public String getKeyCacheSizeInMB() { - return config.get(CONFIG_KEYCACHE_SIZE); + return config.get(PRIAM_PRE + ".keyCache.size"); } public String getKeyCacheKeysToSave() { - return config.get(CONFIG_KEYCACHE_COUNT); + return config.get(PRIAM_PRE + ".keyCache.count"); } public String getRowCacheSizeInMB() { - return config.get(CONFIG_ROWCACHE_SIZE); + return config.get(PRIAM_PRE + ".rowCache.size"); } public String getRowCacheKeysToSave() { - return config.get(CONFIG_ROWCACHE_COUNT); + return config.get(PRIAM_PRE + ".rowCache.count"); } @Override public String getCassProcessName() { - return config.get(CONFIG_CASS_PROCESS_NAME, DEFAULT_CASS_PROCESS_NAME); - } - - public int getNumTokens() { - return config.get(CONFIG_VNODE_NUM_TOKENS, DEFAULT_VNODE_NUM_TOKENS); + return config.get(PRIAM_PRE + ".cass.process", "CassandraDaemon"); } public String getYamlLocation() { - return config.get(CONFIG_YAML_LOCATION, getCassHome() + "/conf/cassandra.yaml"); + return config.get(PRIAM_PRE + ".yamlLocation", getCassHome() + "/conf/cassandra.yaml"); } @Override - public String getJVMOptionsFileLocation() - { - return config.get(PRIAM_PRE + ".jvm.options.location", getCassHome() + "/conf/jvm.options"); + public String getJVMOptionsFileLocation() { + return config.get( + PRIAM_PRE + ".jvm.options.location", getCassHome() + "/conf/jvm-server.options"); } public String getAuthenticator() { - return config.get(CONFIG_AUTHENTICATOR, DEFAULT_AUTHENTICATOR); + return config.get( + PRIAM_PRE + ".authenticator", "org.apache.cassandra.auth.AllowAllAuthenticator"); } public String getAuthorizer() { - return config.get(CONFIG_AUTHORIZER, DEFAULT_AUTHORIZER); + return config.get( + PRIAM_PRE + ".authorizer", "org.apache.cassandra.auth.AllowAllAuthorizer"); + } + + public String getRoleManager() { + return config.get( + PRIAM_PRE + ".roleManager", "org.apache.cassandra.auth.CassandraRoleManager"); } @Override public boolean doesCassandraStartManually() { - return config.get(CONFIG_CASS_MANUAL_START_ENABLE, false); + return config.get(PRIAM_PRE + ".cass.manual.start.enable", false); } public String getInternodeCompression() { - return config.get(CONFIG_INTERNODE_COMPRESSION, DEFAULT_INTERNODE_COMPRESSION); - } - - @Override - public void setRestorePrefix(String prefix) { - config.set(CONFIG_RESTORE_PREFIX, prefix); - + return config.get(PRIAM_PRE + ".internodeCompression", "all"); } @Override public boolean isBackingUpCommitLogs() { - return config.get(CONFIG_COMMITLOG_BKUP_ENABLED, false); + return config.get(PRIAM_PRE + ".clbackup.enabled", false); } @Override public String getCommitLogBackupPropsFile() { - return config.get(CONFIG_COMMITLOG_PROPS_FILE, getCassHome() + DEFAULT_COMMITLOG_PROPS_FILE); + return config.get( + PRIAM_PRE + ".clbackup.propsfile", + getCassHome() + "/conf/commitlog_archiving.properties"); } @Override public String getCommitLogBackupArchiveCmd() { - return config.get(CONFIG_COMMITLOG_ARCHIVE_CMD, "/bin/ln %path /mnt/data/backup/%name"); + return config.get( + PRIAM_PRE + ".clbackup.archiveCmd", "/bin/ln %path /mnt/data/backup/%name"); } @Override public String getCommitLogBackupRestoreCmd() { - return config.get(CONFIG_COMMITLOG_RESTORE_CMD, "/bin/mv %from %to"); + return config.get(PRIAM_PRE + ".clbackup.restoreCmd", "/bin/mv %from %to"); } @Override public String getCommitLogBackupRestoreFromDirs() { - return config.get(CONFIG_COMMITLOG_RESTORE_DIRS, "/mnt/data/backup/commitlog/"); + return config.get(PRIAM_PRE + ".clbackup.restoreDirs", "/mnt/data/backup/commitlog/"); } @Override public String getCommitLogBackupRestorePointInTime() { - return config.get(CONFIG_COMMITLOG_RESTORE_POINT_IN_TIME, ""); + return config.get(PRIAM_PRE + ".clbackup.restoreTime", ""); } @Override public int maxCommitLogsRestore() { - return config.get(CONFIG_COMMITLOG_RESTORE_MAX, 10); - } - - @Override - public boolean isVpcRing() { - return config.get(CONFIG_VPC_RING, false); - } - - @Override - public void setRestoreKeySpaces(List keyspaces) { - if (keyspaces == null) - return; - - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < keyspaces.size(); i++) { - if (i > 0) - sb.append(","); - - sb.append(keyspaces.get(i)); - } - - config.set(CONFIG_RESTORE_KEYSPACES, sb.toString()); + return config.get(PRIAM_PRE + ".clrestore.max", 10); } public boolean isClientSslEnabled() { - return config.get(CONFIG_CLIENT_SSL_ENABLED, false); + return config.get(PRIAM_PRE + ".client.sslEnabled", false); } public String getInternodeEncryption() { - return config.get(CONFIG_INTERNODE_ENCRYPTION, "none"); + return config.get(PRIAM_PRE + ".internodeEncryption", "none"); } public boolean isDynamicSnitchEnabled() { - return config.get(CONFIG_DSNITCH_ENABLED, true); - } - - public boolean isThriftEnabled() { - return config.get(CONFIG_THRIFT_ENABLED, true); + return config.get(PRIAM_PRE + ".dsnitchEnabled", true); } public boolean isNativeTransportEnabled() { - return config.get(CONFIG_NATIVE_PROTOCOL_ENABLED, false); + return config.get(PRIAM_PRE + ".nativeTransport.enabled", false); } public int getConcurrentReadsCnt() { - return config.get(CONFIG_CONCURRENT_READS, 32); + return config.get(PRIAM_PRE + ".concurrentReads", 32); } public int getConcurrentWritesCnt() { - return config.get(CONFIG_CONCURRENT_WRITES, 32); + return config.get(PRIAM_PRE + ".concurrentWrites", 32); } public int getConcurrentCompactorsCnt() { int cpus = Runtime.getRuntime().availableProcessors(); - return config.get(CONFIG_CONCURRENT_COMPACTORS, cpus); - } - - public String getRpcServerType() { - return config.get(CONFIG_RPC_SERVER_TYPE, DEFAULT_RPC_SERVER_TYPE); - } - - public int getRpcMinThreads() { - return config.get(CONFIG_RPC_MIN_THREADS, DEFAULT_RPC_MIN_THREADS); - } - - public int getRpcMaxThreads() { - return config.get(CONFIG_RPC_MAX_THREADS, DEFAULT_RPC_MAX_THREADS); - } - - public int getIndexInterval() { - return config.get(CONFIG_INDEX_INTERVAL, DEFAULT_INDEX_INTERVAL); + return config.get(PRIAM_PRE + ".concurrentCompactors", cpus); } @Override @@ -959,33 +510,37 @@ public int getCompactionLargePartitionWarnThresholdInMB() { } public String getExtraConfigParams() { - return config.get(CONFIG_EXTRA_PARAMS); + return config.get(PRIAM_PRE + ".extra.params"); } + @Override public Map getExtraEnvParams() { - String envParams = config.get(CONFIG_EXTRA_ENV_PARAMS); + String envParams = config.get(PRIAM_PRE + ".extra.env.params"); if (envParams == null) { logger.info("getExtraEnvParams: No extra env params"); return null; } - Map extraEnvParamsMap = new HashMap(); + Map extraEnvParamsMap = new HashMap<>(); String[] pairs = envParams.split(","); logger.info("getExtraEnvParams: Extra cass params. From config :{}", envParams); - for (int i = 0; i < pairs.length; i++) { - String[] pair = pairs[i].split("="); + for (String pair1 : pairs) { + String[] pair = pair1.split("="); if (pair.length > 1) { String priamKey = pair[0]; String cassKey = pair[1]; String cassVal = config.get(priamKey); - logger.info("getExtraEnvParams: Start-up/ env params: Priamkey[{}], CassStartupKey[{}], Val[{}]", priamKey, cassKey, cassVal); + logger.info( + "getExtraEnvParams: Start-up/ env params: Priamkey[{}], CassStartupKey[{}], Val[{}]", + priamKey, + cassKey, + cassVal); if (!StringUtils.isBlank(cassKey) && !StringUtils.isBlank(cassVal)) { extraEnvParamsMap.put(cassKey, cassVal); } } } return extraEnvParamsMap; - } public String getCassYamlVal(String priamKey) { @@ -993,120 +548,113 @@ public String getCassYamlVal(String priamKey) { } public boolean getAutoBoostrap() { - return config.get(CONFIG_AUTO_BOOTSTRAP, true); - } - - //values are cassandra, solr, hadoop, spark or hadoop-spark - public String getDseClusterType() { - return config.get(CONFIG_DSE_CLUSTER_TYPE + "." + ASG_NAME, "cassandra"); + return config.get(PRIAM_PRE + ".auto.bootstrap", true); } @Override public boolean isCreateNewTokenEnable() { - return config.get(CONFIG_CREATE_NEW_TOKEN_ENABLE, true); + return config.get(PRIAM_PRE + ".create.new.token.enable", true); } - @Override public String getPrivateKeyLocation() { - return config.get(CONFIG_PRIKEY_LOC); + return config.get(PRIAM_PRE + ".private.key.location"); } @Override public String getRestoreSourceType() { - return config.get(CONFIG_RESTORE_SOURCE_TYPE); + return config.get(PRIAM_PRE + ".restore.source.type"); } @Override public boolean isEncryptBackupEnabled() { - return config.get(CONFIG_ENCRYPTED_BACKUP_ENABLED, false); + return config.get(PRIAM_PRE + ".encrypted.backup.enabled", false); } @Override public String getAWSRoleAssumptionArn() { - return config.get(CONFIG_S3_ROLE_ASSUMPTION_ARN); + return config.get(PRIAM_PRE + ".roleassumption.arn"); } @Override public String getClassicEC2RoleAssumptionArn() { - return config.get(CONFIG_EC2_ROLE_ASSUMPTION_ARN); + return config.get(PRIAM_PRE + ".ec2.roleassumption.arn"); } @Override public String getVpcEC2RoleAssumptionArn() { - return config.get(CONFIG_VPC_ROLE_ASSUMPTION_ARN); + return config.get(PRIAM_PRE + ".vpc.roleassumption.arn"); } @Override public boolean isDualAccount() { - return config.get(CONFIG_DUAL_ACCOUNT, DEFAULT_DUAL_ACCOUNT); + return config.get(PRIAM_PRE + ".roleassumption.dualaccount", false); } @Override public String getGcsServiceAccountId() { - return config.get(CONFIG_GCS_SERVICE_ACCT_ID); + return config.get(PRIAM_PRE + ".gcs.service.acct.id"); } @Override public String getGcsServiceAccountPrivateKeyLoc() { - return config.get(CONFIG_GCS_SERVICE_ACCT_PRIVATE_KEY_LOC, "/apps/tomcat/conf/gcsentryptedkey.p12"); + return config.get( + PRIAM_PRE + ".gcs.service.acct.private.key", + "/apps/tomcat/conf/gcsentryptedkey.p12"); } @Override public String getPgpPasswordPhrase() { - return config.get(CONFIG_PGP_PASSWORD_PHRASE); + return config.get(PRIAM_PRE + ".pgp.password.phrase"); } @Override public String getPgpPublicKeyLoc() { - return config.get(CONFIG_PGP_PUB_KEY_LOC); + return config.get(PRIAM_PRE + ".pgp.pubkey.file.location"); + } + + @Override + public boolean enableAsyncIncremental() { + return config.get(PRIAM_PRE + ".async.incremental", false); } @Override - /* - * @return the vpc id of the running instance. - */ - public String getVpcId() { - return NETWORK_VPC; + public boolean enableAsyncSnapshot() { + return config.get(PRIAM_PRE + ".async.snapshot", false); } @Override - public Boolean isIncrBackupParallelEnabled() { - return config.get(PRIAM_PRE + ".incremental.bkup.parallel", false); + public int getBackupQueueSize() { + return config.get(PRIAM_PRE + ".backup.queue.size", 100000); } @Override - public int getIncrementalBkupMaxConsumers() { - return config.get(PRIAM_PRE + ".incremental.bkup.max.consumers", 4); + public int getDownloadQueueSize() { + return config.get(PRIAM_PRE + ".download.queue.size", 100000); } @Override - public int getUncrementalBkupQueueSize() { - return config.get(PRIAM_PRE + ".incremental.bkup.queue.size", 100000); + public long getUploadTimeout() { + return config.get(PRIAM_PRE + ".upload.timeout", (2 * 60 * 60 * 1000L)); + } + + public long getDownloadTimeout() { + return config.get(PRIAM_PRE + ".download.timeout", (10 * 60 * 60 * 1000L)); } - /** - * @return tombstone_warn_threshold in yaml - */ @Override public int getTombstoneWarnThreshold() { - return config.get(CONFIG_TOMBSTONE_WARNING_THRESHOLD, DEFAULT_TOMBSTONE_WARNING_THRESHOLD); + return config.get(PRIAM_PRE + ".tombstone.warning.threshold", 1000); } - /** - * @return tombstone_failure_threshold in yaml - */ @Override public int getTombstoneFailureThreshold() { - return config.get(CONFIG_TOMBSTONE_FAILURE_THRESHOLD, DEFAULT_TOMBSTONE_FAILURE_THRESHOLD); + return config.get(PRIAM_PRE + ".tombstone.failure.threshold", 100000); } - /** - * @return streaming_socket_timeout_in_ms in yaml - */ @Override - public int getStreamingSocketTimeoutInMS() { - return config.get(CONFIG_STREAMING_SOCKET_TIMEOUT_IN_MS, DEFAULT_STREAMING_SOCKET_TIMEOUT_IN_MS); + public int getStreamingKeepAlivePeriod() { + return config.get(PRIAM_PRE + ".streaming.socket.keepalive.s", 300); } @Override @@ -1115,18 +663,20 @@ public String getFlushKeyspaces() { } @Override - public String getFlushInterval() { - return config.get(PRIAM_PRE + ".flush.interval"); + public String getBackupStatusFileLoc() { + return config.get( + PRIAM_PRE + ".backup.status.location", + getDataFileLocation() + File.separator + "backup.status"); } @Override - public String getBackupStatusFileLoc() { - return config.get(CONFIG_BACKUP_STATUS_FILE_LOCATION, getDataFileLocation() + File.separator + "backup.status"); + public boolean useSudo() { + return config.get(PRIAM_PRE + ".cass.usesudo", true); } @Override - public boolean useSudo() { - return config.get(CONFIG_CASS_USE_SUDO, true); + public boolean enableBackupNotification() { + return config.get(PRIAM_PRE + ".enableBackupNotification", true); } @Override @@ -1136,35 +686,92 @@ public String getBackupNotificationTopicArn() { @Override public boolean isPostRestoreHookEnabled() { - return config.get(CONFIG_POST_RESTORE_HOOK_ENABLED, false); + return config.get(PRIAM_PRE + ".postrestorehook.enabled", false); } @Override public String getPostRestoreHook() { - return config.get(CONFIG_POST_RESTORE_HOOK); + return config.get(PRIAM_PRE + ".postrestorehook"); } @Override public String getPostRestoreHookHeartbeatFileName() { - return config.get(CONFIG_POST_RESTORE_HOOK_HEARTBEAT_FILENAME, getDataFileLocation() + File.separator + "postrestorehook_heartbeat"); + return config.get( + PRIAM_PRE + ".postrestorehook.heartbeat.filename", + getDataFileLocation() + File.separator + "postrestorehook_heartbeat"); } @Override public String getPostRestoreHookDoneFileName() { - return config.get(CONFIG_POST_RESTORE_HOOK_DONE_FILENAME, getDataFileLocation() + File.separator + "postrestorehook_done"); + return config.get( + PRIAM_PRE + ".postrestorehook.done.filename", + getDataFileLocation() + File.separator + "postrestorehook_done"); } @Override public int getPostRestoreHookTimeOutInDays() { - return config.get(CONFIG_POST_RESTORE_HOOK_TIMEOUT_IN_DAYS, 2); + return config.get(PRIAM_PRE + ".postrestorehook.timeout.in.days", 2); } @Override public int getPostRestoreHookHeartBeatTimeoutInMs() { - return config.get(CONFIG_POST_RESTORE_HOOK_HEARTBEAT_TIMEOUT_MS, 120000); + return config.get(PRIAM_PRE + ".postrestorehook.heartbeat.timeout", 120000); } + @Override public int getPostRestoreHookHeartbeatCheckFrequencyInMs() { - return config.get(CONFIG_POST_RESTORE_HOOK_HEARTBEAT_CHECK_FREQUENCY_MS, 120000); + return config.get(PRIAM_PRE + ".postrestorehook.heartbeat.check.frequency", 120000); + } + + @Override + public String getProperty(String key, String defaultValue) { + return config.get(key, defaultValue); + } + + @Override + public String getMergedConfigurationCronExpression() { + // Every minute on the top of the minute. + return config.get(PRIAM_PRE + ".configMerge.cron", "0 * * * * ? *"); + } + + @Override + public int getGracePeriodDaysForCompaction() { + return config.get(PRIAM_PRE + ".gracePeriodDaysForCompaction", 5); + } + + @Override + public int getForgottenFileGracePeriodDaysForRead() { + return config.get(PRIAM_PRE + ".forgottenFileGracePeriodDaysForRead", 3); + } + + @Override + public boolean isForgottenFileMoveEnabled() { + return config.get(PRIAM_PRE + ".forgottenFileMoveEnabled", false); + } + + @Override + public BackupsToCompress getBackupsToCompress() { + return BackupsToCompress.valueOf( + config.get("priam.backupsToCompress", BackupsToCompress.ALL.name())); + } + + @Override + public String getDiskAccessMode() { + return config.get(PRIAM_PRE + ".diskAccessMode", "auto"); + } + + @Override + public boolean permitDirectTokenAssignmentWithGossipMismatch() { + return config.get(PRIAM_PRE + ".permitDirectTokenAssignmentWithGossipMismatch", false); + } + + @Override + public int getTargetMinutesToCompleteSnaphotUpload() { + return config.get(PRIAM_PRE + ".snapshotUploadDuration", 0); + } + + @Override + public double getRateLimitChangeThreshold() { + return config.get(PRIAM_PRE + ".rateLimitChangeThreshold", 0.1); } } diff --git a/priam/src/main/java/com/netflix/priam/config/PriamConfigurationPersister.java b/priam/src/main/java/com/netflix/priam/config/PriamConfigurationPersister.java new file mode 100644 index 000000000..a22d9a54b --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/config/PriamConfigurationPersister.java @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.config; + +import com.fasterxml.jackson.core.util.MinimalPrettyPrinter; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.netflix.priam.scheduler.CronTimer; +import com.netflix.priam.scheduler.Task; +import com.netflix.priam.scheduler.TaskTimer; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Map; +import javax.inject.Inject; +import javax.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Task that persists structured and merged priam configuration to disk. */ +@Singleton +public class PriamConfigurationPersister extends Task { + public static final String NAME = "PriamConfigurationPersister"; + + private static final Logger logger = LoggerFactory.getLogger(PriamConfigurationPersister.class); + + private final Path mergedConfigDirectory; + private final Path structuredPath; + + @Inject + public PriamConfigurationPersister(IConfiguration config) { + super(config); + + mergedConfigDirectory = Paths.get(config.getMergedConfigurationDirectory()); + structuredPath = Paths.get(config.getMergedConfigurationDirectory(), "structured.json"); + } + + private synchronized void ensurePaths() throws IOException { + File directory = mergedConfigDirectory.toFile(); + + if (directory.mkdirs()) { + Files.setPosixFilePermissions( + mergedConfigDirectory, PosixFilePermissions.fromString("rwx------")); + logger.info("Set up PriamConfigurationPersister directory successfully"); + } + } + + @Override + public void execute() throws Exception { + ensurePaths(); + Path tempPath = null; + try { + File output = + File.createTempFile( + structuredPath.getFileName().toString(), + ".tmp", + mergedConfigDirectory.toFile()); + tempPath = output.toPath(); + + // The configuration might contain sensitive information, so ... don't let non Priam + // users read it + // Theoretically createTempFile creates the file with the right permissions, but I want + // to be explicit + Files.setPosixFilePermissions(tempPath, PosixFilePermissions.fromString("rw-------")); + + Map structuredConfiguration = config.getStructuredConfiguration("all"); + + ObjectMapper mapper = new ObjectMapper(); + ObjectWriter structuredPathTmpWriter = mapper.writer(new MinimalPrettyPrinter()); + structuredPathTmpWriter.writeValue(output, structuredConfiguration); + + // Atomically swap out the new config for the old config. + if (!output.renameTo(structuredPath.toFile())) + logger.error("Failed to persist structured Priam configuration"); + } finally { + if (tempPath != null) Files.deleteIfExists(tempPath); + } + } + + @Override + public String getName() { + return NAME; + } + + /** + * Timer to be used for configuration writing. + * + * @param config {@link IConfiguration} to get configuration details from priam. + * @return the timer to be used for Configuration Persisting from {@link + * IConfiguration#getMergedConfigurationCronExpression()} + */ + public static TaskTimer getTimer(IConfiguration config) { + return CronTimer.getCronTimer(NAME, config.getMergedConfigurationCronExpression()); + } +} diff --git a/priam/src/main/java/com/netflix/priam/configSource/AbstractConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/AbstractConfigSource.java index bc770afd7..ee4485559 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/AbstractConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/AbstractConfigSource.java @@ -16,24 +16,21 @@ */ package com.netflix.priam.configSource; +import static com.google.common.base.Preconditions.checkNotNull; + import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; - import java.util.List; +import org.apache.commons.lang3.StringUtils; -import static com.google.common.base.Preconditions.checkNotNull; - -/** - * Base implementations for most methods on {@link IConfigSource}. - */ +/** Base implementations for most methods on {@link IConfigSource}. */ public abstract class AbstractConfigSource implements IConfigSource { private String asgName; private String region; @Override - public void intialize(final String asgName, final String region) { + public void initialize(final String asgName, final String region) { this.asgName = checkNotNull(asgName, "ASG name is not defined"); this.region = checkNotNull(region, "Region is not defined"); } @@ -174,5 +171,4 @@ private List getTrimmedStringList(String[] strings) { } return list; } - } diff --git a/priam/src/main/java/com/netflix/priam/configSource/CompositeConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/CompositeConfigSource.java index 4229f9395..97703c24e 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/CompositeConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/CompositeConfigSource.java @@ -20,24 +20,27 @@ import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; - import java.util.Collection; /** - * A {@link IConfigSource} that delegates method calls to the underline sources. The order in which values are provided - * depend on the {@link IConfigSource}s provided. If user asks for key 'foo', and this composite has three sources, it - * will first check if the key is found in the first source, if not it will check the second and if not, the third, else - * return null or false if {@link #contains(String)} was called. + * A {@link IConfigSource} that delegates method calls to the underline sources. The order in which + * values are provided depend on the {@link IConfigSource}s provided. If user asks for key 'foo', + * and this composite has three sources, it will first check if the key is found in the first + * source, if not it will check the second and if not, the third, else return null or false if + * {@link #contains(String)} was called. * - * Implementation note: get methods with a default are implemented in {@link AbstractConfigSource}, if the underlying - * source overrides one of these methods, then that implementation will be ignored. + *

Implementation note: get methods with a default are implemented in {@link + * AbstractConfigSource}, if the underlying source overrides one of these methods, then that + * implementation will be ignored. */ public class CompositeConfigSource extends AbstractConfigSource { private final ImmutableCollection sources; public CompositeConfigSource(final ImmutableCollection sources) { - Preconditions.checkArgument(!sources.isEmpty(), "Can not create a composite config source without config sources!"); + Preconditions.checkArgument( + !sources.isEmpty(), + "Can not create a composite config source without config sources!"); this.sources = sources; } @@ -54,10 +57,10 @@ public CompositeConfigSource(final IConfigSource... sources) { } @Override - public void intialize(final String asgName, final String region) { + public void initialize(final String asgName, final String region) { for (final IConfigSource source : sources) { - //TODO should this catch any potential exceptions? - source.intialize(asgName, region); + // TODO should this catch any potential exceptions? + source.initialize(asgName, region); } } @@ -96,8 +99,10 @@ public String get(final String key) { public void set(final String key, final String value) { Preconditions.checkNotNull(value, "Value can not be null for configurations."); final IConfigSource firstSource = Iterables.getFirst(sources, null); - // firstSource shouldn't be null because the collection is immutable, and the collection is non empty. - Preconditions.checkState(firstSource != null, "There was no IConfigSource found at the first location?"); + // firstSource shouldn't be null because the collection is immutable, and the collection is + // non empty. + Preconditions.checkState( + firstSource != null, "There was no IConfigSource found at the first location?"); firstSource.set(key, value); } } diff --git a/priam/src/main/java/com/netflix/priam/configSource/IConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/IConfigSource.java index eb6ee1315..08b336e0b 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/IConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/IConfigSource.java @@ -17,22 +17,20 @@ package com.netflix.priam.configSource; import com.google.inject.ImplementedBy; - import java.util.List; -/** - * Defines the configurations for an application. - */ +/** Defines the configurations for an application. */ @ImplementedBy(PriamConfigSource.class) public interface IConfigSource { /** - * Must be called before any other method. This method will allow implementations to do any setup that they require - * before being called. + * Must be called before any other method. This method will allow implementations to do any + * setup that they require before being called. + * * @param asgName: Name of the asg * @param region: Name of the region */ - void intialize(String asgName, String region); + void initialize(String asgName, String region); /** * A non-negative integer indicating a count of elements. @@ -67,7 +65,7 @@ public interface IConfigSource { /** * Get a String associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -76,7 +74,7 @@ public interface IConfigSource { /** * Get a boolean associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -85,7 +83,7 @@ public interface IConfigSource { /** * Get a Class associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -94,9 +92,9 @@ public interface IConfigSource { /** * Get a Enum associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. - * @param enum type. + * @param enum type. * @return value from config or defaultValue if not present. */ > T get(String key, T defaultValue); @@ -104,7 +102,7 @@ public interface IConfigSource { /** * Get a int associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -113,7 +111,7 @@ public interface IConfigSource { /** * Get a long associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -122,7 +120,7 @@ public interface IConfigSource { /** * Get a float associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -131,7 +129,7 @@ public interface IConfigSource { /** * Get a double associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -140,7 +138,7 @@ public interface IConfigSource { /** * Get a list of strings associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @return value from config or an immutable list if not present. */ List getList(String key); @@ -148,7 +146,7 @@ public interface IConfigSource { /** * Get a list of strings associated with the given configuration key. * - * @param key to look up value. + * @param key to look up value. * @param defaultValue if value is not present. * @return value from config or defaultValue if not present. */ @@ -157,7 +155,7 @@ public interface IConfigSource { /** * Set the value for the given key. * - * @param key to set value for. + * @param key to set value for. * @param value to set. */ void set(String key, String value); diff --git a/priam/src/main/java/com/netflix/priam/configSource/MemoryConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/MemoryConfigSource.java index 1b6b5a0a2..25c4f80bd 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/MemoryConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/MemoryConfigSource.java @@ -17,7 +17,6 @@ package com.netflix.priam.configSource; import com.google.common.collect.Maps; - import java.util.Map; public final class MemoryConfigSource extends AbstractConfigSource { diff --git a/priam/src/main/java/com/netflix/priam/configSource/PriamConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/PriamConfigSource.java index a2d78c467..fcc6c6f35 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/PriamConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/PriamConfigSource.java @@ -16,27 +16,22 @@ */ package com.netflix.priam.configSource; -import com.netflix.priam.configSource.CompositeConfigSource; -import com.netflix.priam.configSource.PropertiesConfigSource; -import com.netflix.priam.configSource.SimpleDBConfigSource; -import com.netflix.priam.configSource.SystemPropertiesConfigSource; -import com.netflix.priam.configSource.IConfigSource; - import javax.inject.Inject; /** - * Default {@link IConfigSource} pulling in configs from SimpleDB, local Properties, and System Properties. + * Default {@link com.netflix.priam.configSource.IConfigSource} pulling in configs from SimpleDB, + * local Properties, and System Properties. */ public class PriamConfigSource extends CompositeConfigSource { @Inject - public PriamConfigSource(final SimpleDBConfigSource simpleDBConfigSource, - final PropertiesConfigSource propertiesConfigSource, - final SystemPropertiesConfigSource systemPropertiesConfigSource) { - // this order was based off PriamConfigurations loading. W/e loaded last could override, but with Composite, first + public PriamConfigSource( + final SimpleDBConfigSource simpleDBConfigSource, + final PropertiesConfigSource propertiesConfigSource, + final SystemPropertiesConfigSource systemPropertiesConfigSource) { + // this order was based off PriamConfigurations loading. W/e loaded last could override, + // but with Composite, first // has the highest priority. - super(simpleDBConfigSource, - propertiesConfigSource, - systemPropertiesConfigSource); + super(simpleDBConfigSource, propertiesConfigSource, systemPropertiesConfigSource); } } diff --git a/priam/src/main/java/com/netflix/priam/configSource/PropertiesConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/PropertiesConfigSource.java index 6385fbecd..40ce0246f 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/PropertiesConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/PropertiesConfigSource.java @@ -16,25 +16,23 @@ */ package com.netflix.priam.configSource; +import static com.google.common.base.Preconditions.checkNotNull; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Maps; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.net.URL; import java.util.Map; import java.util.Properties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import static com.google.common.base.Preconditions.checkNotNull; - -/** - * Loads the 'Priam.properties' file as a source. - */ +/** Loads the 'Priam.properties' file as a source. */ public class PropertiesConfigSource extends AbstractConfigSource { - private static final Logger logger = LoggerFactory.getLogger(PropertiesConfigSource.class.getName()); + private static final Logger logger = + LoggerFactory.getLogger(PropertiesConfigSource.class.getName()); private static final String DEFAULT_PRIAM_PROPERTIES = "Priam.properties"; @@ -57,8 +55,8 @@ public PropertiesConfigSource(final Properties properties) { } @Override - public void intialize(final String asgName, final String region) { - super.intialize(asgName, region); + public void initialize(final String asgName, final String region) { + super.initialize(asgName, region); Properties properties = new Properties(); URL url = PropertiesConfigSource.class.getClassLoader().getResource(priamFile); if (url != null) { @@ -84,7 +82,6 @@ public void set(final String key, final String value) { data.put(key, value); } - @Override public int size() { return data.size(); @@ -96,7 +93,7 @@ public boolean contains(final String prop) { } /** - * Clones all the values from the properties. If the value is null, it will be ignored. + * Clones all the values from the properties. If the value is null, it will be ignored. * * @param properties to clone */ diff --git a/priam/src/main/java/com/netflix/priam/configSource/SimpleDBConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/SimpleDBConfigSource.java index 11d68ddd6..e836d7f02 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/SimpleDBConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/SimpleDBConfigSource.java @@ -25,29 +25,33 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.priam.cred.ICredential; +import java.util.Iterator; +import java.util.Map; +import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.inject.Inject; -import java.util.Iterator; -import java.util.Map; - /** - * Loads config data from SimpleDB. {@link #intialize(String, String)} will query the SimpleDB domain "PriamProperties" - * for any potential configurations. The domain is set up to support multiple different clusters; this is done by using - * amazon's auto scaling groups (ASG). + * Loads config data from SimpleDB. {@link #initialize(String, String)} will query the SimpleDB + * domain "PriamProperties" for any potential configurations. The domain is set up to support + * multiple different clusters; this is done by using amazon's auto scaling groups (ASG). + * + *

Schema * - * Schema

    - *
  • "appId" // ASG up to first instance of '-'. So ASG name priam-test will create appId priam, ASG priam_test - * will create appId priam_test.
  • - *
  • "property" // key to use for configs.
  • - *
  • "value" // value to set for the given property/key.
  • - *
  • "region" // region the config belongs to. If left empty, then applies to all regions.
  • - *
} + *
    + *
  • "appId" // ASG up to first instance of '-'. So ASG name priam-test will create appId priam, + * ASG priam_test will create appId priam_test. + *
  • "property" // key to use for configs. + *
  • "value" // value to set for the given property/key. + *
  • "region" // region the config belongs to. If left empty, then applies to all regions. + *
+ * + * } */ public final class SimpleDBConfigSource extends AbstractConfigSource { - private static final Logger logger = LoggerFactory.getLogger(SimpleDBConfigSource.class.getName()); + private static final Logger logger = + LoggerFactory.getLogger(SimpleDBConfigSource.class.getName()); private static final String DOMAIN = "PriamProperties"; @@ -60,14 +64,18 @@ public SimpleDBConfigSource(final ICredential provider) { } @Override - public void intialize(final String asgName, final String region) { - super.intialize(asgName, region); + public void initialize(final String asgName, final String region) { + super.initialize(asgName, region); // End point is us-east-1 - AmazonSimpleDB simpleDBClient = AmazonSimpleDBClient.builder().withCredentials(provider.getAwsCredentialProvider()).build(); + AmazonSimpleDB simpleDBClient = + AmazonSimpleDBClient.builder() + .withCredentials(provider.getAwsCredentialProvider()) + .build(); String nextToken = null; - String appid = asgName.lastIndexOf('-') > 0 ? asgName.substring(0, asgName.indexOf('-')) : asgName; + String appid = + asgName.lastIndexOf('-') > 0 ? asgName.substring(0, asgName.indexOf('-')) : asgName; logger.info("appid used to fetch properties is: {}", appid); do { String ALL_QUERY = "select * from " + DOMAIN + " where " + Attributes.APP_ID + "='%s'"; @@ -75,19 +83,16 @@ public void intialize(final String asgName, final String region) { request.setNextToken(nextToken); SelectResult result = simpleDBClient.select(request); nextToken = result.getNextToken(); - Iterator itemiter = result.getItems().iterator(); - while (itemiter.hasNext()) - addProperty(itemiter.next()); + for (Item item : result.getItems()) addProperty(item); - } - while (nextToken != null); + } while (nextToken != null); } private static class Attributes { - public final static String APP_ID = "appId"; // ASG - public final static String PROPERTY = "property"; - public final static String PROPERTY_VALUE = "value"; - public final static String REGION = "region"; + public static final String APP_ID = "appId"; // ASG + public static final String PROPERTY = "property"; + public static final String PROPERTY_VALUE = "value"; + public static final String REGION = "region"; } private void addProperty(Item item) { @@ -97,19 +102,14 @@ private void addProperty(Item item) { String dc = ""; while (attrs.hasNext()) { Attribute att = attrs.next(); - if (att.getName().equals(Attributes.PROPERTY)) - prop = att.getValue(); - else if (att.getName().equals(Attributes.PROPERTY_VALUE)) - value = att.getValue(); - else if (att.getName().equals(Attributes.REGION)) - dc = att.getValue(); + if (att.getName().equals(Attributes.PROPERTY)) prop = att.getValue(); + else if (att.getName().equals(Attributes.PROPERTY_VALUE)) value = att.getValue(); + else if (att.getName().equals(Attributes.REGION)) dc = att.getValue(); } // Ignore, if not this region - if (StringUtils.isNotBlank(dc) && !dc.equals(getRegion())) - return; + if (StringUtils.isNotBlank(dc) && !dc.equals(getRegion())) return; // Override only if region is specified - if (data.containsKey(prop) && StringUtils.isBlank(dc)) - return; + if (data.containsKey(prop) && StringUtils.isBlank(dc)) return; data.put(prop, value); } diff --git a/priam/src/main/java/com/netflix/priam/configSource/SystemPropertiesConfigSource.java b/priam/src/main/java/com/netflix/priam/configSource/SystemPropertiesConfigSource.java index d1dd68bbc..d975bce42 100644 --- a/priam/src/main/java/com/netflix/priam/configSource/SystemPropertiesConfigSource.java +++ b/priam/src/main/java/com/netflix/priam/configSource/SystemPropertiesConfigSource.java @@ -19,15 +19,15 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.priam.config.PriamConfiguration; - import java.util.Map; import java.util.Properties; /** * Loads {@link System#getProperties()} as a source. * - * Implementation note: {@link #set(String, String)} does not write to system properties, but will write to a new map. - * This means that setting values to this source has no effect on system properties or other instances of this class. + *

Implementation note: {@link #set(String, String)} does not write to system properties, but + * will write to a new map. This means that setting values to this source has no effect on system + * properties or other instances of this class. */ public final class SystemPropertiesConfigSource extends AbstractConfigSource { private static final String BLANK = ""; @@ -35,14 +35,13 @@ public final class SystemPropertiesConfigSource extends AbstractConfigSource { private final Map data = Maps.newConcurrentMap(); @Override - public void intialize(final String asgName, final String region) { - super.intialize(asgName, region); + public void initialize(final String asgName, final String region) { + super.initialize(asgName, region); Properties systemProps = System.getProperties(); for (final String key : systemProps.stringPropertyNames()) { - if (!key.startsWith(PriamConfiguration.PRIAM_PRE)) - continue; + if (!key.startsWith(PriamConfiguration.PRIAM_PRE)) continue; final String value = systemProps.getProperty(key); if (value != null && !BLANK.equals(value)) { data.put(key, value); diff --git a/priam/src/main/java/com/netflix/priam/connection/CassandraOperations.java b/priam/src/main/java/com/netflix/priam/connection/CassandraOperations.java new file mode 100644 index 000000000..79579de6c --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/connection/CassandraOperations.java @@ -0,0 +1,205 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.connection; + +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.RetryableCallable; +import java.util.*; +import javax.inject.Inject; +import org.apache.cassandra.db.ColumnFamilyStoreMBean; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** This class encapsulates interactions with Cassandra. Created by aagrawal on 6/19/18. */ +public class CassandraOperations implements ICassandraOperations { + private static final Logger logger = LoggerFactory.getLogger(CassandraOperations.class); + private final IConfiguration configuration; + + @Inject + CassandraOperations(IConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public synchronized void takeSnapshot(final String snapshotName) throws Exception { + // Retry max of 6 times with 10 second in between (for one minute). This is to ensure that + // we overcome any temporary glitch. + // Note that operation MAY fail if cassandra successfully took the snapshot of certain + // columnfamily(ies) and we try to create snapshot with + // same name. It is a good practice to call clearSnapshot after this operation fails, to + // ensure we don't leave any left overs. + // Example scenario: Change of file permissions by manual intervention and C* unable to take + // snapshot of one CF. + try { + new RetryableCallable(6, 10000) { + public Void retriableCall() throws Exception { + JMXNodeTool nodetool = JMXNodeTool.instance(configuration); + nodetool.takeSnapshot(snapshotName, null, Collections.emptyMap()); + return null; + } + }.call(); + } catch (Exception e) { + logger.error( + "Error while taking snapshot {}. Asking Cassandra to clear snapshot to avoid accumulation of snapshots.", + snapshotName); + clearSnapshot(snapshotName); + throw e; + } + } + + @Override + public void clearSnapshot(final String snapshotTag) throws Exception { + new RetryableCallable() { + public Void retriableCall() throws Exception { + JMXNodeTool nodetool = JMXNodeTool.instance(configuration); + nodetool.clearSnapshot(snapshotTag); + return null; + } + }.call(); + } + + @Override + public List getKeyspaces() throws Exception { + return new RetryableCallable>() { + public List retriableCall() throws Exception { + try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { + return nodeTool.getKeyspaces(); + } + } + }.call(); + } + + @Override + public Map> getColumnfamilies() throws Exception { + return new RetryableCallable>>() { + public Map> retriableCall() throws Exception { + try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { + final Map> columnfamilies = new HashMap<>(); + Iterator> columnfamilyStoreMBean = + nodeTool.getColumnFamilyStoreMBeanProxies(); + columnfamilyStoreMBean.forEachRemaining( + entry -> { + columnfamilies.putIfAbsent(entry.getKey(), new ArrayList<>()); + columnfamilies + .get(entry.getKey()) + .add(entry.getValue().getColumnFamilyName()); + }); + return columnfamilies; + } + } + }.call(); + } + + @Override + public void forceKeyspaceCompaction(String keyspaceName, String... columnfamilies) + throws Exception { + new RetryableCallable() { + public Void retriableCall() throws Exception { + try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { + nodeTool.forceKeyspaceCompaction(false, keyspaceName, columnfamilies); + return null; + } + } + }.call(); + } + + @Override + public void forceKeyspaceFlush(String keyspaceName) throws Exception { + new RetryableCallable() { + public Void retriableCall() throws Exception { + try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { + nodeTool.forceKeyspaceFlush(keyspaceName); + return null; + } + } + }.call(); + } + + @Override + public List> gossipInfo() throws Exception { + List> returnPublicIpSourceIpMap = new ArrayList(); + try { + JMXNodeTool nodeTool; + try { + nodeTool = JMXNodeTool.instance(configuration); + } catch (JMXConnectionException e) { + logger.error( + "Exception in fetching c* jmx tool . Msg: {}", e.getLocalizedMessage(), e); + throw e; + } + String gossipInfoLines[] = nodeTool.getGossipInfo(false).split("/"); + Arrays.stream(gossipInfoLines) + .forEach( + gossipInfoLine -> { + Map gossipMap = new HashMap<>(); + String gossipInfoSubLines[] = gossipInfoLine.split("\\r?\\n"); + if (gossipInfoSubLines.length + > 2) // Random check for existence of some lines + { + gossipMap.put("PUBLIC_IP", gossipInfoSubLines[0].trim()); + if (gossipMap.get("PUBLIC_IP") != null) { + returnPublicIpSourceIpMap.add(gossipMap); + } + + for (String gossipInfoSubLine : gossipInfoSubLines) { + String gossipLineEntry[] = gossipInfoSubLine.split(":"); + if (gossipLineEntry.length == 2) { + gossipMap.put( + gossipLineEntry[0].trim().toUpperCase(), + gossipLineEntry[1].trim()); + } else if (gossipLineEntry.length == 3) { + if (gossipLineEntry[0] + .trim() + .equalsIgnoreCase("STATUS")) { + // Special handling for STATUS as C* puts first + // token in STATUS or "true". + gossipMap.put( + gossipLineEntry[0].trim().toUpperCase(), + gossipLineEntry[2].split(",")[0].trim()); + } else if (gossipLineEntry[0] + .trim() + .equalsIgnoreCase("TOKENS")) { + // Special handling for tokens as it is always + // "hidden". + try { + gossipMap.put( + gossipLineEntry[0].trim().toUpperCase(), + nodeTool.getTokens( + gossipMap.get( + "PUBLIC_IP")) + .toString()); + } catch (Exception e) { + logger.warn( + "Unable to find TOKEN(s) for the IP: {}", + gossipMap.get("PUBLIC_IP")); + } + } else { + gossipMap.put( + gossipLineEntry[0].trim().toUpperCase(), + gossipLineEntry[2].trim()); + } + } + } + } + }); + + } catch (Exception e) { + logger.error("Unable to parse nodetool gossipinfo output from Cassandra.", e); + } + return returnPublicIpSourceIpMap; + } +} diff --git a/priam/src/main/java/com/netflix/priam/connection/ICassandraOperations.java b/priam/src/main/java/com/netflix/priam/connection/ICassandraOperations.java new file mode 100644 index 000000000..ae0c97201 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/connection/ICassandraOperations.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.connection; + +import java.util.List; +import java.util.Map; + +/** Created by aagrawal on 2/16/19. */ +public interface ICassandraOperations { + + /** + * This method neds to be synchronized. Context: During the transition phase to backup version + * 2.0, we might be executing multiple snapshots at the same time. To avoid, unknown behavior by + * Cassandra, it is wise to keep this method sync. Also, with backups being on CRON, we don't + * know how often operator is taking snapshot. + * + * @param snapshotName Name of the snapshot on disk. This snapshotName should be UNIQUE among + * all the snapshots. Try to append UUID to snapshotName to ensure uniqueness. This is to + * ensure a) Snapshot fails if name are not unique. b) You might take snapshots which are + * not "part" of same snapshot. e.g. Any leftovers from previous operation. c) Once snapshot + * fails, this will clean the failed snapshot. + * @throws Exception in case of error while taking a snapshot by Cassandra. + */ + void takeSnapshot(final String snapshotName) throws Exception; + + /** + * Clear the snapshot tag from disk. + * + * @param snapshotTag Name of the snapshot to be removed. + * @throws Exception in case of error while clearing a snapshot. + */ + void clearSnapshot(final String snapshotTag) throws Exception; + + /** + * Get all the keyspaces existing on this node. + * + * @return List of keyspace names. + * @throws Exception in case of reaching to JMX endpoint. + */ + List getKeyspaces() throws Exception; + + Map> getColumnfamilies() throws Exception; + + void forceKeyspaceCompaction(String keyspaceName, String... columnfamilies) throws Exception; + + void forceKeyspaceFlush(String keyspaceName) throws Exception; + + List> gossipInfo() throws Exception; +} diff --git a/priam/src/main/java/com/netflix/priam/utils/INodeToolObservable.java b/priam/src/main/java/com/netflix/priam/connection/INodeToolObservable.java similarity index 89% rename from priam/src/main/java/com/netflix/priam/utils/INodeToolObservable.java rename to priam/src/main/java/com/netflix/priam/connection/INodeToolObservable.java index 5e70530f4..f299af625 100644 --- a/priam/src/main/java/com/netflix/priam/utils/INodeToolObservable.java +++ b/priam/src/main/java/com/netflix/priam/connection/INodeToolObservable.java @@ -1,19 +1,20 @@ -/** - * Copyright 2017 Netflix, Inc. - *

+/* + * Copyright 2019 Netflix, Inc. + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - *

+ * * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.priam.utils; +package com.netflix.priam.connection; public interface INodeToolObservable { /* diff --git a/priam/src/main/java/com/netflix/priam/utils/INodeToolObserver.java b/priam/src/main/java/com/netflix/priam/connection/INodeToolObserver.java similarity index 89% rename from priam/src/main/java/com/netflix/priam/utils/INodeToolObserver.java rename to priam/src/main/java/com/netflix/priam/connection/INodeToolObserver.java index 154b051ce..bd20c0044 100644 --- a/priam/src/main/java/com/netflix/priam/utils/INodeToolObserver.java +++ b/priam/src/main/java/com/netflix/priam/connection/INodeToolObserver.java @@ -1,19 +1,20 @@ -/** - * Copyright 2017 Netflix, Inc. - *

+/* + * Copyright 2019 Netflix, Inc. + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - *

+ * * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.priam.utils; +package com.netflix.priam.connection; import org.apache.cassandra.tools.NodeProbe; diff --git a/priam/src/main/java/com/netflix/priam/utils/JMXConnectionException.java b/priam/src/main/java/com/netflix/priam/connection/JMXConnectionException.java similarity index 90% rename from priam/src/main/java/com/netflix/priam/utils/JMXConnectionException.java rename to priam/src/main/java/com/netflix/priam/connection/JMXConnectionException.java index e20faab8e..f815eb722 100644 --- a/priam/src/main/java/com/netflix/priam/utils/JMXConnectionException.java +++ b/priam/src/main/java/com/netflix/priam/connection/JMXConnectionException.java @@ -1,19 +1,20 @@ -/** - * Copyright 2017 Netflix, Inc. - *

+/* + * Copyright 2019 Netflix, Inc. + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - *

+ * * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package com.netflix.priam.utils; +package com.netflix.priam.connection; import java.io.IOException; @@ -28,5 +29,4 @@ public JMXConnectionException(String message) { public JMXConnectionException(String message, Exception e) { super(message, e); } - } diff --git a/priam/src/main/java/com/netflix/priam/connection/JMXNodeTool.java b/priam/src/main/java/com/netflix/priam/connection/JMXNodeTool.java new file mode 100644 index 000000000..76d940e7d --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/connection/JMXNodeTool.java @@ -0,0 +1,438 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.connection; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.CassandraMonitor; +import com.netflix.priam.utils.BoundedExponentialRetryCallable; +import java.io.IOException; +import java.io.PrintStream; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryUsage; +import java.lang.reflect.Field; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.text.DecimalFormat; +import java.util.*; +import java.util.Map.Entry; +import java.util.concurrent.ExecutionException; +import javax.management.JMX; +import javax.management.MBeanServerConnection; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import org.apache.cassandra.db.ColumnFamilyStoreMBean; +import org.apache.cassandra.repair.messages.RepairOption; +import org.apache.cassandra.tools.NodeProbe; +import org.codehaus.jettison.json.JSONArray; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Class to get data out of Cassandra JMX */ +@Singleton +public class JMXNodeTool extends NodeProbe implements INodeToolObservable { + private static final Logger logger = LoggerFactory.getLogger(JMXNodeTool.class); + private static volatile JMXNodeTool tool = null; + private MBeanServerConnection mbeanServerConn = null; + + private static final Set observers = new HashSet<>(); + + /** + * Hostname and Port to talk to will be same server for now optionally we might want the ip to + * poll. + * + *

NOTE: This class shouldn't be a singleton and this shouldn't be cached. + * + *

This will work only if cassandra runs. + */ + public JMXNodeTool(String host, int port) throws IOException, InterruptedException { + super(host, port); + } + + public JMXNodeTool(String host, int port, String username, String password) + throws IOException, InterruptedException { + super(host, port, username, password); + } + + @Inject + public JMXNodeTool(IConfiguration config) throws IOException, InterruptedException { + super("localhost", config.getJmxPort()); + } + + /** + * try to create if it is null. + * + * @throws JMXConnectionException + */ + public static JMXNodeTool instance(IConfiguration config) throws JMXConnectionException { + if (!testConnection()) tool = connect(config); + return tool; + } + + public static T getRemoteBean( + Class clazz, String mbeanName, IConfiguration config, boolean mxbean) + throws IOException, MalformedObjectNameException { + if (mxbean) + return ManagementFactory.newPlatformMXBeanProxy( + JMXNodeTool.instance(config).mbeanServerConn, mbeanName, clazz); + else + return JMX.newMBeanProxy( + JMXNodeTool.instance(config).mbeanServerConn, new ObjectName(mbeanName), clazz); + } + + /** + * This method will test if you can connect and query something before handing over the + * connection, This is required for our retry logic. + * + * @return + */ + private static boolean testConnection() { + // connecting first time hence return false. + if (tool == null) return false; + + try { + MBeanServerConnection serverConn = tool.mbeanServerConn; + if (serverConn == null) { + logger.info( + "Test connection to remove MBean server failed as there is no connection."); + return false; + } + + if (serverConn.getMBeanCount() + < 1) { // If C* is up, it should have at multiple MBeans registered. + logger.info( + "Test connection to remove MBean server failed as there is no registered MBeans."); + return false; + } + } catch (Throwable ex) { + closeQuietly(tool); + logger.error( + "Exception while checking JMX connection to C*, msg: {}", + ex.getLocalizedMessage()); + return false; + } + return true; + } + + private static void closeQuietly(JMXNodeTool tool) { + try { + tool.close(); + } catch (Exception e) { + logger.warn("failed to close jmx node tool", e); + } + } + + /** + * A means to clean up existing and recreate the JMX connection to the Cassandra process. + * + * @return the new connection. + */ + public static synchronized JMXNodeTool createNewConnection(final IConfiguration config) + throws JMXConnectionException { + return createConnection(config); + } + + public static synchronized JMXNodeTool connect(final IConfiguration config) + throws JMXConnectionException { + // lets make sure some other monitor didn't sneak in the recreated the connection already + if (!testConnection()) { + + if (tool != null) { + try { + tool.close(); // Ensure we properly close any existing (even if it's + // corrupted) connection to the remote jmx agent + } catch (IOException e) { + logger.warn( + "Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}", + e.getLocalizedMessage(), + e); + } + } + + } else { + // Someone beat you and already created the connection, nothing you need to do.. + return tool; + } + + return createConnection(config); + } + + private static JMXNodeTool createConnection(final IConfiguration config) + throws JMXConnectionException { + // If Cassandra is started then only start the monitoring + if (!CassandraMonitor.hasCassadraStarted()) { + String exceptionMsg = + "Cannot perform connection to remove jmx agent as Cassandra has not yet started, check back again later"; + logger.debug(exceptionMsg); + throw new JMXConnectionException(exceptionMsg); + } + + if (tool + != null) { // lets make sure we properly close any existing (even if it's corrupted) + // connection to the remote jmx agent + try { + tool.close(); + } catch (IOException e) { + logger.warn( + "Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}", + e.getLocalizedMessage(), + e); + } + } + + try { + + tool = + new BoundedExponentialRetryCallable() { + @Override + public JMXNodeTool retriableCall() throws Exception { + JMXNodeTool nodetool; + if ((config.getJmxUsername() == null + || config.getJmxUsername().isEmpty()) + && (config.getJmxPassword() == null + || config.getJmxPassword().isEmpty())) { + nodetool = new JMXNodeTool("localhost", config.getJmxPort()); + } else { + nodetool = + new JMXNodeTool( + "localhost", + config.getJmxPort(), + config.getJmxUsername(), + config.getJmxPassword()); + } + + Field fields[] = NodeProbe.class.getDeclaredFields(); + for (Field field : fields) { + if (!field.getName().equals("mbeanServerConn")) continue; + field.setAccessible(true); + nodetool.mbeanServerConn = + (MBeanServerConnection) field.get(nodetool); + } + + return nodetool; + } + }.call(); + + } catch (Exception e) { + logger.error(e.getMessage(), e); + throw new JMXConnectionException(e.getMessage()); + } + + logger.info("Connected to remote jmx agent, will notify interested parties!"); + for (INodeToolObserver observer : observers) { + observer.nodeToolHasChanged(tool); + } + + return tool; + } + + /** + * You must do the compaction before running this to remove the duplicate tokens out of the + * server. TODO code it. + */ + public JSONObject estimateKeys() throws JSONException { + Iterator> it = + super.getColumnFamilyStoreMBeanProxies(); + JSONObject object = new JSONObject(); + while (it.hasNext()) { + Entry entry = it.next(); + object.put("keyspace", entry.getKey()); + object.put("column_family", entry.getValue().getColumnFamilyName()); + object.put("estimated_size", entry.getValue().estimateKeys()); + } + return object; + } + + public JSONObject info() throws JSONException { + JSONObject object = new JSONObject(); + object.put("gossip_active", isInitialized()); + object.put("native_active", isNativeTransportRunning()); + object.put("token", getTokens().toString()); + object.put("load", getLoadString()); + object.put("generation_no", getCurrentGenerationNumber()); + object.put("uptime", getUptime() / 1000); + MemoryUsage heapUsage = getHeapMemoryUsage(); + double memUsed = (double) heapUsage.getUsed() / (1024 * 1024); + double memMax = (double) heapUsage.getMax() / (1024 * 1024); + object.put("heap_memory_mb", memUsed + "/" + memMax); + object.put("data_center", getDataCenter()); + object.put("rack", getRack()); + return object; + } + + public JSONObject statusInformation() throws JSONException { + JSONObject jsonObject = new JSONObject(); + jsonObject.put("live", getLiveNodes(false)); + jsonObject.put("unreachable", getUnreachableNodes(false)); + jsonObject.put("joining", getJoiningNodes(false)); + jsonObject.put("leaving", getLeavingNodes(false)); + jsonObject.put("moving", getMovingNodes(false)); + jsonObject.put("tokenToEndpointMap", getTokenToEndpointMap(false)); + return jsonObject; + } + + public JSONArray ring(String keyspace) throws JSONException { + JSONArray ring = new JSONArray(); + Map tokenToEndpoint = getTokenToEndpointMap(false); + List sortedTokens = new ArrayList<>(tokenToEndpoint.keySet()); + + Collection liveNodes = getLiveNodes(false); + Collection deadNodes = getUnreachableNodes(false); + Collection joiningNodes = getJoiningNodes(false); + Collection leavingNodes = getLeavingNodes(false); + Collection movingNodes = getMovingNodes(false); + Map loadMap = getLoadMap(false); + + String format = "%-16s%-12s%-12s%-7s%-8s%-16s%-20s%-44s%n"; + + // Calculate per-token ownership of the ring + Map ownerships; + try { + ownerships = effectiveOwnership(keyspace); + } catch (IllegalStateException ex) { + ownerships = getOwnership(); + } + + for (String token : sortedTokens) { + String primaryEndpoint = tokenToEndpoint.get(token); + String dataCenter; + try { + dataCenter = getEndpointSnitchInfoProxy().getDatacenter(primaryEndpoint); + } catch (UnknownHostException e) { + dataCenter = "Unknown"; + } + String rack; + try { + rack = getEndpointSnitchInfoProxy().getRack(primaryEndpoint); + } catch (UnknownHostException e) { + rack = "Unknown"; + } + String status = + liveNodes.contains(primaryEndpoint) + ? "Up" + : deadNodes.contains(primaryEndpoint) ? "Down" : "?"; + + String state = "Normal"; + + if (joiningNodes.contains(primaryEndpoint)) state = "Joining"; + else if (leavingNodes.contains(primaryEndpoint)) state = "Leaving"; + else if (movingNodes.contains(primaryEndpoint)) state = "Moving"; + + String load = loadMap.getOrDefault(primaryEndpoint, "?"); + String owns = + new DecimalFormat("##0.00%") + .format(ownerships.get(token) == null ? 0.0F : ownerships.get(token)); + ring.put( + createJson( + primaryEndpoint, dataCenter, rack, status, state, load, owns, token)); + } + return ring; + } + + private JSONObject createJson( + String primaryEndpoint, + String dataCenter, + String rack, + String status, + String state, + String load, + String owns, + String token) + throws JSONException { + JSONObject object = new JSONObject(); + object.put("endpoint", primaryEndpoint); + object.put("dc", dataCenter); + object.put("rack", rack); + object.put("status", status); + object.put("state", state); + object.put("load", load); + object.put("owns", owns); + object.put("token", token); + return object; + } + + public void repair(boolean isSequential, boolean localDataCenterOnly) + throws IOException, ExecutionException, InterruptedException { + repair(isSequential, localDataCenterOnly, false); + } + + public void repair(boolean isSequential, boolean localDataCenterOnly, boolean primaryRange) + throws IOException, ExecutionException, InterruptedException { + Map repairOptions = new HashMap<>(); + repairOptions.put(RepairOption.PARALLELISM_KEY, Boolean.toString(!isSequential)); + repairOptions.put(RepairOption.PRIMARY_RANGE_KEY, Boolean.toString(primaryRange)); + if (localDataCenterOnly) repairOptions.put(RepairOption.DATACENTERS_KEY, getDataCenter()); + + PrintStream printStream = new PrintStream("repair.log"); + + for (String keyspace : getKeyspaces()) repairAsync(printStream, keyspace, repairOptions); + } + + public void cleanup() throws IOException, ExecutionException, InterruptedException { + for (String keyspace : getKeyspaces()) forceKeyspaceCleanup(0, keyspace); + } + + public void setIncrementalBackupsEnabled(boolean enabled) { + super.setIncrementalBackupsEnabled(enabled); + } + + public boolean isIncrementalBackupsEnabled() { + return super.isIncrementalBackupsEnabled(); + } + + public void refresh(List keyspaces) + throws IOException, ExecutionException, InterruptedException { + Iterator> it = + super.getColumnFamilyStoreMBeanProxies(); + while (it.hasNext()) { + Entry entry = it.next(); + if (keyspaces.contains(entry.getKey())) { + logger.info( + "Refreshing {} {}", entry.getKey(), entry.getValue().getColumnFamilyName()); + loadNewSSTables(entry.getKey(), entry.getValue().getColumnFamilyName()); + } + } + } + + @Override + public void close() throws IOException { + synchronized (JMXNodeTool.class) { + tool = null; + super.close(); + } + } + + /** @param observer to add to list of internal observers. This behavior is thread-safe. */ + @Override + public void addObserver(INodeToolObserver observer) { + if (observer == null) throw new NullPointerException("Cannot not observer."); + synchronized (observers) { + observers.add(observer); // if observer exist, it's a noop + } + } + + /** @param observer to be removed; behavior is thread-safe. */ + @Override + public void deleteObserver(INodeToolObserver observer) { + synchronized (observers) { + observers.remove(observer); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/cred/ClearCredential.java b/priam/src/main/java/com/netflix/priam/cred/ClearCredential.java index ccfec2fc9..3d3a0ea6a 100644 --- a/priam/src/main/java/com/netflix/priam/cred/ClearCredential.java +++ b/priam/src/main/java/com/netflix/priam/cred/ClearCredential.java @@ -19,20 +19,17 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; +import java.io.FileInputStream; +import java.util.Properties; import org.apache.cassandra.io.util.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.FileInputStream; -import java.util.Properties; - /** - * This is a basic implementation of ICredentials. User should prefer to - * implement their own versions for more secured access. This class requires - * clear AWS key and access. - * - * Set the following properties in "conf/awscredntial.properties" + * This is a basic implementation of ICredentials. User should prefer to implement their own + * versions for more secured access. This class requires clear AWS key and access. * + *

Set the following properties in "conf/awscredntial.properties" */ public class ClearCredential implements ICredential { private static final Logger logger = LoggerFactory.getLogger(ClearCredential.class); @@ -46,7 +43,10 @@ public ClearCredential() { fis = new FileInputStream(CRED_FILE); final Properties props = new Properties(); props.load(fis); - AWS_ACCESS_ID = props.getProperty("AWSACCESSID") != null ? props.getProperty("AWSACCESSID").trim() : ""; + AWS_ACCESS_ID = + props.getProperty("AWSACCESSID") != null + ? props.getProperty("AWSACCESSID").trim() + : ""; AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : ""; } catch (Exception e) { logger.error("Exception with credential file ", e); diff --git a/priam/src/main/java/com/netflix/priam/cred/ICredential.java b/priam/src/main/java/com/netflix/priam/cred/ICredential.java index 9cdb79878..127ef1c48 100644 --- a/priam/src/main/java/com/netflix/priam/cred/ICredential.java +++ b/priam/src/main/java/com/netflix/priam/cred/ICredential.java @@ -19,14 +19,9 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.google.inject.ImplementedBy; -/** - * Credential file interface for services supporting - * Access ID and key authentication - */ +/** Credential file interface for services supporting Access ID and key authentication */ @ImplementedBy(ClearCredential.class) public interface ICredential { - /** - * @return AWS Credential Provider object - */ + /** @return AWS Credential Provider object */ AWSCredentialsProvider getAwsCredentialProvider(); } diff --git a/priam/src/main/java/com/netflix/priam/cred/ICredentialGeneric.java b/priam/src/main/java/com/netflix/priam/cred/ICredentialGeneric.java index d4975ad48..59893385d 100755 --- a/priam/src/main/java/com/netflix/priam/cred/ICredentialGeneric.java +++ b/priam/src/main/java/com/netflix/priam/cred/ICredentialGeneric.java @@ -17,14 +17,16 @@ package com.netflix.priam.cred; /** - * Credential file interface for services supporting - * Access ID and key authentication for non-AWS + * Credential file interface for services supporting Access ID and key authentication for non-AWS */ public interface ICredentialGeneric extends ICredential { byte[] getValue(KEY key); enum KEY { - PGP_PUBLIC_KEY_LOC, PGP_PASSWORD, GCS_SERVICE_ID, GCS_PRIVATE_KEY_LOC + PGP_PUBLIC_KEY_LOC, + PGP_PASSWORD, + GCS_SERVICE_ID, + GCS_PRIVATE_KEY_LOC } } diff --git a/priam/src/main/java/com/netflix/priam/cryptography/CryptographyAlgorithm.java b/priam/src/main/java/com/netflix/priam/cryptography/CryptographyAlgorithm.java new file mode 100644 index 000000000..04940286b --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/cryptography/CryptographyAlgorithm.java @@ -0,0 +1,6 @@ +package com.netflix.priam.cryptography; + +public enum CryptographyAlgorithm { + PLAINTEXT, + PGP +} diff --git a/priam/src/main/java/com/netflix/priam/cryptography/IFileCryptography.java b/priam/src/main/java/com/netflix/priam/cryptography/IFileCryptography.java index 894e44ed7..3e8d0d58b 100755 --- a/priam/src/main/java/com/netflix/priam/cryptography/IFileCryptography.java +++ b/priam/src/main/java/com/netflix/priam/cryptography/IFileCryptography.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cryptography; @@ -23,7 +21,8 @@ public interface IFileCryptography { /** * @param in - a handle to the encrypted, compressed data stream * @param passwd - pass phrase used to extract the PGP private key from the encrypted content. - * @param objectName - name of the object we are decrypting, currently use for debugging purposes only. + * @param objectName - name of the object we are decrypting, currently use for debugging + * purposes only. * @return a handle to the decrypted, uncompress data stream. */ InputStream decryptStream(InputStream in, char[] passwd, String objectName) throws Exception; @@ -33,6 +32,4 @@ public interface IFileCryptography { * @return - an iterate of the ciphertext stream */ Iterator encryptStream(InputStream is, String fileName) throws Exception; - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCredential.java b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCredential.java index 174de3cd0..cc5675ec5 100755 --- a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCredential.java +++ b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCredential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cryptography.pgp; @@ -22,11 +20,11 @@ /* * A generic implementation of fetch keys as plaintext. The key values are used within PGP cryptography algorithm. Users may - * want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm. + * want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm. */ public class PgpCredential implements ICredentialGeneric { - private IConfiguration config; + private final IConfiguration config; @Inject public PgpCredential(IConfiguration config) { @@ -51,8 +49,5 @@ public byte[] getValue(KEY key) { } else { throw new IllegalArgumentException("Key value not supported."); } - } - - } diff --git a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCryptography.java b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCryptography.java index 26e30ed8f..4f1289aed 100755 --- a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCryptography.java +++ b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpCryptography.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cryptography.pgp; @@ -18,18 +16,17 @@ import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cryptography.IFileCryptography; -import org.apache.commons.io.IOUtils; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.bouncycastle.openpgp.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.*; import java.security.NoSuchProviderException; import java.security.SecureRandom; import java.security.Security; import java.util.Date; import java.util.Iterator; +import org.apache.commons.io.IOUtils; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openpgp.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class PgpCryptography implements IFileCryptography { private static final Logger logger = LoggerFactory.getLogger(PgpCryptography.class); @@ -37,15 +34,14 @@ public class PgpCryptography implements IFileCryptography { private IConfiguration config; static { - Security.addProvider(new BouncyCastleProvider()); //tell the JVM the security provider is PGP + // tell the JVM the security provider is PGP + Security.addProvider(new BouncyCastleProvider()); } @Inject public PgpCryptography(IConfiguration config) { this.config = config; - - } private PGPSecretKeyRingCollection getPgpSecurityCollection() { @@ -54,7 +50,8 @@ private PGPSecretKeyRingCollection getPgpSecurityCollection() { try { keyIn = new BufferedInputStream(new FileInputStream(config.getPrivateKeyLocation())); } catch (FileNotFoundException e) { - throw new IllegalStateException("PGP private key file not found. file: " + config.getPrivateKeyLocation()); + throw new IllegalStateException( + "PGP private key file not found. file: " + config.getPrivateKeyLocation()); } try { @@ -62,10 +59,11 @@ private PGPSecretKeyRingCollection getPgpSecurityCollection() { return new PGPSecretKeyRingCollection(PGPUtil.getDecoderStream(keyIn)); } catch (Exception e) { - logger.error("Exception in reading PGP security collection ring. Msg: {}", e.getLocalizedMessage()); + logger.error( + "Exception in reading PGP security collection ring. Msg: {}", + e.getLocalizedMessage()); throw new IllegalStateException("Exception in reading PGP security collection ring", e); } - } private PGPPublicKey getPubKey() { @@ -74,7 +72,9 @@ private PGPPublicKey getPubKey() { pubKeyIS = new BufferedInputStream(new FileInputStream(config.getPgpPublicKeyLoc())); } catch (FileNotFoundException e) { - logger.error("Exception in reading PGP security collection ring. Msg: {}", e.getLocalizedMessage()); + logger.error( + "Exception in reading PGP security collection ring. Msg: {}", + e.getLocalizedMessage()); throw new RuntimeException("Exception in reading PGP public key", e); } @@ -94,59 +94,84 @@ private PGPPublicKey getPubKey() { * @return a handle to the decrypted, uncompress data stream. */ @Override - public InputStream decryptStream(InputStream in, char[] passwd, String objectName) throws Exception { + public InputStream decryptStream(InputStream in, char[] passwd, String objectName) + throws Exception { logger.info("Start to decrypt object: {}", objectName); in = PGPUtil.getDecoderStream(in); - PGPObjectFactory inPgpReader = new PGPObjectFactory(in); //general class for reading a stream of data. + // general class for reading a stream of data. + PGPObjectFactory inPgpReader = new PGPObjectFactory(in); Object o = inPgpReader.nextObject(); PGPEncryptedDataList encryptedDataList; // the first object might be a PGP marker packet. - if (o instanceof PGPEncryptedDataList) - encryptedDataList = (PGPEncryptedDataList) o; + if (o instanceof PGPEncryptedDataList) encryptedDataList = (PGPEncryptedDataList) o; else - encryptedDataList = (PGPEncryptedDataList) inPgpReader.nextObject(); //first object was a marker, the real data is the next one. + // first object was a marker, the real data is the next one. + encryptedDataList = (PGPEncryptedDataList) inPgpReader.nextObject(); - Iterator encryptedDataIterator = encryptedDataList.getEncryptedDataObjects(); //get the iterator so we can iterate through all the encrypted data. + // get the iterator so we can iterate through all the encrypted data. + Iterator encryptedDataIterator = encryptedDataList.getEncryptedDataObjects(); - PGPPrivateKey privateKey = null; //to be use for decryption - PGPPublicKeyEncryptedData encryptedDataStreamHandle = null; //a handle to the encrypted data stream + // to be use for decryption + PGPPrivateKey privateKey = null; + // a handle to the encrypted data stream + PGPPublicKeyEncryptedData encryptedDataStreamHandle = null; while (privateKey == null && encryptedDataIterator.hasNext()) { - encryptedDataStreamHandle = (PGPPublicKeyEncryptedData) encryptedDataIterator.next(); //a handle to the encrypted data stream + // a handle to the encrypted data stream + encryptedDataStreamHandle = (PGPPublicKeyEncryptedData) encryptedDataIterator.next(); try { - privateKey = findSecretKey(getPgpSecurityCollection(), encryptedDataStreamHandle.getKeyID(), passwd); + privateKey = + findSecretKey( + getPgpSecurityCollection(), + encryptedDataStreamHandle.getKeyID(), + passwd); } catch (Exception ex) { - throw new IllegalStateException("decryption exception: object: " + objectName + ", Exception when fetching private key using key: " + encryptedDataStreamHandle.getKeyID(), ex); + throw new IllegalStateException( + "decryption exception: object: " + + objectName + + ", Exception when fetching private key using key: " + + encryptedDataStreamHandle.getKeyID(), + ex); } - } if (privateKey == null) - throw new IllegalStateException("decryption exception: object: " + objectName + ", Private key for message not found."); + throw new IllegalStateException( + "decryption exception: object: " + + objectName + + ", Private key for message not found."); - //finally, lets decrypt the object + // finally, lets decrypt the object InputStream decryptInputStream = encryptedDataStreamHandle.getDataStream(privateKey, "BC"); PGPObjectFactory decryptedDataReader = new PGPObjectFactory(decryptInputStream); - //the decrypted data object is compressed, lets decompress it. - PGPCompressedData compressedDataReader = (PGPCompressedData) decryptedDataReader.nextObject(); //get a handle to the decrypted, compress data stream - InputStream compressedStream = new BufferedInputStream(compressedDataReader.getDataStream()); + // the decrypted data object is compressed, lets decompress it. + // get a handle to the decrypted, compress data stream + PGPCompressedData compressedDataReader = + (PGPCompressedData) decryptedDataReader.nextObject(); + InputStream compressedStream = + new BufferedInputStream(compressedDataReader.getDataStream()); PGPObjectFactory compressedStreamReader = new PGPObjectFactory(compressedStream); Object data = compressedStreamReader.nextObject(); if (data instanceof PGPLiteralData) { PGPLiteralData dataPgpReader = (PGPLiteralData) data; - return dataPgpReader.getInputStream(); //a handle to the decrypted, uncompress data stream + // a handle to the decrypted, uncompress data stream + return dataPgpReader.getInputStream(); } else if (data instanceof PGPOnePassSignatureList) { - throw new PGPException("decryption exception: object: " + objectName + ", encrypted data contains a signed message - not literal data."); + throw new PGPException( + "decryption exception: object: " + + objectName + + ", encrypted data contains a signed message - not literal data."); } else { - throw new PGPException("decryption exception: object: " + objectName + ", data is not a simple encrypted file - type unknown."); + throw new PGPException( + "decryption exception: object: " + + objectName + + ", data is not a simple encrypted file - type unknown."); } - - } /* @@ -158,7 +183,9 @@ public InputStream decryptStream(InputStream in, char[] passwd, String objectNam * @param pass - pass phrase used to extract the PGP private key from the encrypted content. * @return PGP private key, null if not found. */ - private static PGPPrivateKey findSecretKey(PGPSecretKeyRingCollection securityCollection, long keyID, char[] pass) throws PGPException, NoSuchProviderException { + private static PGPPrivateKey findSecretKey( + PGPSecretKeyRingCollection securityCollection, long keyID, char[] pass) + throws PGPException, NoSuchProviderException { PGPSecretKey privateKey = securityCollection.getSecretKey(keyID); if (privateKey == null) { @@ -166,7 +193,6 @@ private static PGPPrivateKey findSecretKey(PGPSecretKeyRingCollection securityCo } return privateKey.extractPrivateKey(pass, "BC"); - } @Override @@ -180,10 +206,10 @@ public class ChunkEncryptorStream implements Iterator { private static final int MAX_CHUNK = 10 * 1024 * 1024; private boolean hasnext = true; - private InputStream is; - private InputStream encryptedSrc; - private ByteArrayOutputStream bos; - private BufferedOutputStream pgout; + private final InputStream is; + private final InputStream encryptedSrc; + private final ByteArrayOutputStream bos; + private final BufferedOutputStream pgout; public ChunkEncryptorStream(InputStream is, String fileName, PGPPublicKey pubKey) { this.is = is; @@ -196,7 +222,6 @@ public ChunkEncryptorStream(InputStream is, String fileName, PGPPublicKey pubKey @Override public boolean hasNext() { return this.hasnext; - } /* @@ -212,13 +237,16 @@ public byte[] next() { int count; while ((count = encryptedSrc.read(buffer, 0, buffer.length)) != -1) { pgout.write(buffer, 0, count); - if (bos.size() >= MAX_CHUNK) - return returnSafe(); + if (bos.size() >= MAX_CHUNK) return returnSafe(); } - return done(); //flush remaining data in buffer and close resources. + // flush remaining data in buffer and close resources. + return done(); } catch (Exception e) { - throw new RuntimeException("Error encountered returning next chunk of ciphertext. Msg: " + e.getLocalizedMessage(), e); + throw new RuntimeException( + "Error encountered returning next chunk of ciphertext. Msg: " + + e.getLocalizedMessage(), + e); } } @@ -241,67 +269,80 @@ private byte[] returnSafe() { * flush remaining data in buffer and close resources. */ private byte[] done() throws IOException { - pgout.flush(); //flush whatever is in the buffer to the output stream + pgout.flush(); // flush whatever is in the buffer to the output stream - this.hasnext = false; //tell clients that there is no more data + this.hasnext = false; // tell clients that there is no more data byte[] returnData = this.bos.toByteArray(); - IOUtils.closeQuietly(pgout); //close the handle to the buffered output - IOUtils.closeQuietly(bos); //close the handle to the actual output + IOUtils.closeQuietly(pgout); // close the handle to the buffered output + IOUtils.closeQuietly(bos); // close the handle to the actual output return returnData; - } - } public class EncryptedInputStream extends InputStream { - private InputStream srcHandle; //handle to the source stream - private ByteArrayOutputStream bos = null; //Handle to encrypted stream - private int bosOff = 0; //current position within encrypted stream - private OutputStream pgpBosWrapper; //wrapper around the buffer which will contain the encrypted data. - private OutputStream encryptedOsWrapper; //handle to the encrypted data - private PGPCompressedDataGenerator compressedDataGenerator; //a means to compress data using PGP - private String fileName; //TODO: eliminate once debugging is completed. + private final InputStream srcHandle; // handle to the source stream + private ByteArrayOutputStream bos = null; // Handle to encrypted stream + private int bosOff = 0; // current position within encrypted stream + private OutputStream + pgpBosWrapper; // wrapper around the buffer which will contain the encrypted data. + private OutputStream encryptedOsWrapper; // handle to the encrypted data + private PGPCompressedDataGenerator + compressedDataGenerator; // a means to compress data using PGP + private String fileName; // TODO: eliminate once debugging is completed. public EncryptedInputStream(InputStream is, String fileName, PGPPublicKey pubKey) { this.srcHandle = is; this.bos = new ByteArrayOutputStream(); - //creates a cipher stream which will have an integrity packet associated with it - PGPEncryptedDataGenerator encryptedDataGenerator = new PGPEncryptedDataGenerator(PGPEncryptedData.CAST5, true, new SecureRandom(), "BC"); + // creates a cipher stream which will have an integrity packet associated with it + PGPEncryptedDataGenerator encryptedDataGenerator = + new PGPEncryptedDataGenerator( + PGPEncryptedData.CAST5, true, new SecureRandom(), "BC"); try { - encryptedDataGenerator.addMethod(pubKey); //Add a key encryption method to be used to encrypt the session data associated with this encrypted data - pgpBosWrapper = encryptedDataGenerator.open(bos, new byte[1 << 15]); //wrapper around the buffer which will contain the encrypted data. + // Add a key encryption method to be used to encrypt the session data associated + // with this encrypted data + encryptedDataGenerator.addMethod(pubKey); + // wrapper around the buffer which will contain the encrypted data. + pgpBosWrapper = encryptedDataGenerator.open(bos, new byte[1 << 15]); } catch (Exception e) { - throw new RuntimeException("Exception when wrapping PGP around our output stream", e); + throw new RuntimeException( + "Exception when wrapping PGP around our output stream", e); } - //a means to compress data using PGP - this.compressedDataGenerator = new PGPCompressedDataGenerator(PGPCompressedData.UNCOMPRESSED); + // a means to compress data using PGP + this.compressedDataGenerator = + new PGPCompressedDataGenerator(PGPCompressedData.UNCOMPRESSED); - /* + /* * Open a literal data packet, returning a stream to store the data inside the packet as an indefinite stream. - * A "literal data packet" in PGP world is the body of a message; data that is not to be further interpreted. - * - * The stream is written out as a series of partial packets with a chunk size determine by the size of the passed in buffer. - * @param outputstream - the stream we want the packet in - * @param format - the format we are using. - * @param filename - * @param the time of last modification we want stored. - * @param the buffer to use for collecting data to put into chunks. - */ + * A "literal data packet" in PGP world is the body of a message; data that is not to be further interpreted. + * + * The stream is written out as a series of partial packets with a chunk size determine by the size of the passed in buffer. + * @param outputstream - the stream we want the packet in + * @param format - the format we are using. + * @param filename + * @param the time of last modification we want stored. + * @param the buffer to use for collecting data to put into chunks. + */ try { PGPLiteralDataGenerator literalDataGenerator = new PGPLiteralDataGenerator(); - this.encryptedOsWrapper = literalDataGenerator.open(compressedDataGenerator.open(pgpBosWrapper), PGPLiteralData.BINARY, fileName, new Date(), new byte[1 << 15]); + this.encryptedOsWrapper = + literalDataGenerator.open( + compressedDataGenerator.open(pgpBosWrapper), + PGPLiteralData.BINARY, + fileName, + new Date(), + new byte[1 << 15]); } catch (Exception e) { - throw new RuntimeException("Exception when creating the PGP encrypted wrapper around the output stream.", e); + throw new RuntimeException( + "Exception when creating the PGP encrypted wrapper around the output stream.", + e); } - this.fileName = fileName; //TODO: eliminate once debugging is completed. - - + this.fileName = fileName; // TODO: eliminate once debugging is completed. } /* @@ -315,50 +356,45 @@ public EncryptedInputStream(InputStream is, String fileName, PGPPublicKey pubKey @Override public synchronized int read(byte b[], int off, int len) throws IOException { if (this.bosOff < this.bos.size()) { - //if here, you still have data in the encrypted stream, lets give it to the client + // if here, you still have data in the encrypted stream, lets give it to the client return copyToBuff(b, off, len); } - //If here, it's time to read the next chunk from the input and do the encryption. + // If here, it's time to read the next chunk from the input and do the encryption. this.bos.reset(); this.bosOff = 0; - //== read up to "len" or end of file from input stream and encrypt it. + // == read up to "len" or end of file from input stream and encrypt it. byte[] buff = new byte[1 << 16]; - int bytesRead = 0; //num of bytes read from the source input stream - - while (this.bos.size() < len && (bytesRead = this.srcHandle.read(buff, 0, len)) > 0) { //lets process each chunk from input until we fill our output stream or we reach end of input - - /* TODO: msg was only for debug purposes - * - logger.info("Reading input file: " + this.fileName + ", number of bytes read from input stream: " + bytesRead - + ", size of buffer: " - + buff.length - ); - - */ + int bytesRead = 0; // num of bytes read from the source input stream + while (this.bos.size() < len && (bytesRead = this.srcHandle.read(buff, 0, len)) > 0) { + // lets process each chunk from input until we fill our output + // stream or we reach end of input this.encryptedOsWrapper.write(buff, 0, bytesRead); } - if (bytesRead < 0) { //we have read everything from the source input, lets perform cleanup on any resources. + if (bytesRead < 0) { + // we have read everything from the source input, lets perform cleanup on + // any resources. this.encryptedOsWrapper.close(); this.compressedDataGenerator.close(); this.pgpBosWrapper.close(); } if (bytesRead < 0 && this.bos.size() == 0) { - //if here, read all the bytes from the input and there is nothing in the encrypted stream. + // if here, read all the bytes from the input and there is nothing in the encrypted + // stream. return bytesRead; } - - /* - * If here, one of the following occurred: - * 1. you read data from the input and encrypted it. - * 2. there was no more data in the input but you still had some data in the encrypted stream. - * - */ + + /* + * If here, one of the following occurred: + * 1. you read data from the input and encrypted it. + * 2. there was no more data in the input but you still had some data in the encrypted stream. + * + */ return copyToBuff(b, off, len); } @@ -372,14 +408,17 @@ public synchronized int read(byte b[], int off, int len) throws IOException { * @return number of bytes copied from the encrypted stream to the output buffer */ private int copyToBuff(byte[] buff, int off, int len) { - /* - * num of bytes to copy within encrypted stream = (current size of bytes within encrypted stream - current position within encrypted stream) < size of output buffer, - * then copy what is in the encrypted stream; otherwise, copy up to the max size of the output buffer. - */ - int wlen = (this.bos.size() - this.bosOff) < len ? (this.bos.size() - this.bosOff) : len; - System.arraycopy(this.bos.toByteArray(), this.bosOff, buff, off, wlen); //copy data within encrypted stream to the output buffer - - this.bosOff = this.bosOff + wlen; //now update the current position within the encrypted stream + /* + * num of bytes to copy within encrypted stream = (current size of bytes within encrypted stream - current position within encrypted stream) < size of output buffer, + * then copy what is in the encrypted stream; otherwise, copy up to the max size of the output buffer. + */ + int wlen = + (this.bos.size() - this.bosOff) < len ? (this.bos.size() - this.bosOff) : len; + // copy data within encrypted stream to the output buffer + System.arraycopy(this.bos.toByteArray(), this.bosOff, buff, off, wlen); + + // now update the current position within the encrypted stream + this.bosOff = this.bosOff + wlen; return wlen; } @@ -392,8 +431,8 @@ public void close() throws IOException { @Override public int read() throws IOException { - throw new UnsupportedOperationException("Not supported, invoke read(byte[] bytes, int off, int len) instead."); + throw new UnsupportedOperationException( + "Not supported, invoke read(byte[] bytes, int off, int len) instead."); } - } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpUtil.java b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpUtil.java index 05e5b9740..651c7c81c 100755 --- a/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpUtil.java +++ b/priam/src/main/java/com/netflix/priam/cryptography/pgp/PgpUtil.java @@ -1,44 +1,42 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.cryptography.pgp; -import org.bouncycastle.openpgp.*; - import java.io.BufferedInputStream; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.security.NoSuchProviderException; import java.util.Iterator; - +import org.bouncycastle.openpgp.*; public class PgpUtil { /** - * Search a secret key ring collection for a secret key corresponding to keyID if it - * exists. + * Search a secret key ring collection for a secret key corresponding to keyID if it exists. * * @param pgpSec a secret key ring collection. * @param keyID keyID we want. * @param pass passphrase to decrypt secret key with. - * @return - * @throws PGPException - * @throws NoSuchProviderException + * @return secret or private key corresponding to the keyID. + * @throws PGPException if there is any exception in getting the PGP key corresponding to the ID + * provided. + * @throws NoSuchProviderException If PGP Provider is not available. */ - public static PGPPrivateKey findSecretKey(PGPSecretKeyRingCollection pgpSec, long keyID, char[] pass) throws PGPException, NoSuchProviderException { + public static PGPPrivateKey findSecretKey( + PGPSecretKeyRingCollection pgpSec, long keyID, char[] pass) + throws PGPException, NoSuchProviderException { PGPSecretKey pgpSecKey = pgpSec.getSecretKey(keyID); @@ -57,21 +55,23 @@ public static PGPPublicKey readPublicKey(String fileName) throws IOException, PG } /** - * A simple routine that opens a key ring file and loads the first available key - * suitable for encryption. + * A simple routine that opens a key ring file and loads the first available key suitable for + * encryption. * - * @param input - * @return - * @throws IOException - * @throws PGPException + * @param input inputstream to the pgp file key ring. + * @return PGP key from the key ring. + * @throws IOException If any error in reading from the input stream. + * @throws PGPException if there is any error in getting key from key ring. */ @SuppressWarnings("rawtypes") public static PGPPublicKey readPublicKey(InputStream input) throws IOException, PGPException { - PGPPublicKeyRingCollection pgpPub = new PGPPublicKeyRingCollection(PGPUtil.getDecoderStream(input)); + PGPPublicKeyRingCollection pgpPub = + new PGPPublicKeyRingCollection(PGPUtil.getDecoderStream(input)); // - // we just loop through the collection till we find a key suitable for encryption, in the real + // we just loop through the collection till we find a key suitable for encryption, in the + // real // world you would probably want to be a bit smarter about this. // @@ -91,6 +91,4 @@ public static PGPPublicKey readPublicKey(InputStream input) throws IOException, throw new IllegalArgumentException("Can't find encryption key in key ring."); } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraOperations.java b/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraOperations.java deleted file mode 100644 index 5b2eebf22..000000000 --- a/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraOperations.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.defaultimpl; - -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.utils.JMXNodeTool; -import com.netflix.priam.utils.RetryableCallable; -import org.apache.cassandra.db.ColumnFamilyStoreMBean; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import java.util.*; - -/** - * This class encapsulates interactions with Cassandra. - * Created by aagrawal on 6/19/18. - */ -public class CassandraOperations { - private static final Logger logger = LoggerFactory.getLogger(CassandraOperations.class); - private IConfiguration configuration; - - @Inject - CassandraOperations(IConfiguration configuration) - { - this.configuration = configuration; - } - - /** - * This method neds to be synchronized. Context: During the transition phase to backup version 2.0, we might be executing - * multiple snapshots at the same time. To avoid, unknown behavior by Cassanddra, it is wise to keep this method sync. - * Also, with backups being on CRON, we don't know how often operator is taking snapshot. - * @param snapshotName Name of the snapshot on disk. This snapshotName should be UNIQUE among all the snapshots. - * Try to append UUID to snapshotName to ensure uniqueness. - * This is to ensure a) Snapshot fails if name are not unique. - * b) You might take snapshots which are not "part" of same snapshot. e.g. Any leftovers from previous operation. - * c) Once snapshot fails, this will clean the failed snapshot. - * @throws Exception in case of error while taking a snapshot by Cassandra. - */ - public synchronized void takeSnapshot(final String snapshotName) throws Exception { - //Retry max of 6 times with 10 second in between (for one minute). This is to ensure that we overcome any temporary glitch. - //Note that operation MAY fail if cassandra successfully took the snapshot of certain columnfamily(ies) and we try to create snapshot with - //same name. It is a good practice to call clearSnapshot after this operation fails, to ensure we don't leave - //any left overs. - //Example scenario: Change of file permissions by manual intervention and C* unable to take snapshot of one CF. - try { - new RetryableCallable(6, 10000) { - public Void retriableCall() throws Exception { - JMXNodeTool nodetool = JMXNodeTool.instance(configuration); - nodetool.takeSnapshot(snapshotName, null); - return null; - } - }.call(); - }catch (Exception e){ - logger.error("Error while taking snapshot {}. Asking Cassandra to clear snapshot to avoid accumulation of snapshots.", snapshotName); - clearSnapshot(snapshotName); - throw e; - } - } - - /** - * Clear the snapshot tag from disk. - * @param snapshotTag Name of the snapshot to be removed. - * @throws Exception in case of error while clearing a snapshot. - */ - public void clearSnapshot(final String snapshotTag) throws Exception { - new RetryableCallable() { - public Void retriableCall() throws Exception { - JMXNodeTool nodetool = JMXNodeTool.instance(configuration); - nodetool.clearSnapshot(snapshotTag); - return null; - } - }.call(); - } - - - public List getKeyspaces() throws Exception{ - return new RetryableCallable>(){ - public List retriableCall() throws Exception{ - try(JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { - return nodeTool.getKeyspaces(); - } - } - }.call(); - } - - public Map> getColumnfamilies() throws Exception{ - return new RetryableCallable>>(){ - public Map> retriableCall() throws Exception{ - try(JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { - final Map> columnfamilies = new HashMap<>(); - Iterator> columnfamilyStoreMBean = nodeTool.getColumnFamilyStoreMBeanProxies(); - columnfamilyStoreMBean.forEachRemaining(entry -> { - columnfamilies.putIfAbsent(entry.getKey(), new ArrayList<>()); - columnfamilies.get(entry.getKey()).add(entry.getValue().getColumnFamilyName()); - }); - return columnfamilies; - } - } - }.call(); - } - - public void forceKeyspaceCompaction(String keyspaceName, String... columnfamilies) throws Exception{ - new RetryableCallable(){ - public Void retriableCall() throws Exception{ - try(JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { - nodeTool.forceKeyspaceCompaction(false, keyspaceName, columnfamilies); - return null; - } - } - }.call(); - } - - public void forceKeyspaceFlush(String keyspaceName) throws Exception{ - new RetryableCallable(){ - public Void retriableCall() throws Exception{ - try(JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { - nodeTool.forceKeyspaceFlush(keyspaceName, new String[0]); - return null; - } - } - }.call(); - } -} diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraProcessManager.java b/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraProcessManager.java index 1e647bdae..3ef8d06a6 100644 --- a/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraProcessManager.java +++ b/priam/src/main/java/com/netflix/priam/defaultimpl/CassandraProcessManager.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.defaultimpl; @@ -18,13 +16,9 @@ import com.google.common.collect.Lists; import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.JMXNodeTool; import com.netflix.priam.health.InstanceState; import com.netflix.priam.merics.CassMonitorMetrics; -import com.netflix.priam.utils.JMXNodeTool; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; @@ -33,17 +27,23 @@ import java.util.List; import java.util.Map; import java.util.concurrent.*; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class CassandraProcessManager implements ICassandraProcess { private static final Logger logger = LoggerFactory.getLogger(CassandraProcessManager.class); private static final String SUDO_STRING = "/usr/bin/sudo"; private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000; protected final IConfiguration config; - private InstanceState instanceState; - private CassMonitorMetrics cassMonitorMetrics; + private final InstanceState instanceState; + private final CassMonitorMetrics cassMonitorMetrics; @Inject - public CassandraProcessManager(IConfiguration config, InstanceState instanceState, CassMonitorMetrics cassMonitorMetrics) { + public CassandraProcessManager( + IConfiguration config, + InstanceState instanceState, + CassMonitorMetrics cassMonitorMetrics) { this.config = config; this.instanceState = instanceState; this.cassMonitorMetrics = cassMonitorMetrics; @@ -58,6 +58,7 @@ protected void setEnv(Map env) { env.put("LOCAL_JMX", config.enableRemoteJMX() ? "no" : "yes"); env.put("MAX_DIRECT_MEMORY", config.getMaxDirectMemory()); env.put("CASS_LOGS_DIR", config.getLogDirLocation()); + env.put("CASSANDRA_LOG_DIR", config.getLogDirLocation()); env.put("CASSANDRA_HOME", config.getCassHome()); } @@ -67,14 +68,13 @@ public void start(boolean join_ring) throws IOException { instanceState.setShouldCassandraBeAlive(true); List command = Lists.newArrayList(); - if(config.useSudo()) { + if (config.useSudo()) { logger.info("Configured to use sudo to start C*"); if (!"root".equals(System.getProperty("user.name"))) { command.add(SUDO_STRING); command.add("-n"); command.add("-E"); } - } command.addAll(getStartCommand()); @@ -93,14 +93,12 @@ public void start(boolean join_ring) throws IOException { logger.info("Starting cassandra server ...."); try { - int code = starter.waitFor(); + int code = starter.waitFor(); if (code == 0) { logger.info("Cassandra server has been started"); instanceState.setCassandraProcessAlive(true); this.cassMonitorMetrics.incCassStart(); - } - else - logger.error("Unable to start cassandra server. Error code: {}", code); + } else logger.error("Unable to start cassandra server. Error code: {}", code); logProcessOutput(starter); } catch (Exception e) { @@ -109,10 +107,9 @@ public void start(boolean join_ring) throws IOException { } protected List getStartCommand() { - List startCmd = new LinkedList(); + List startCmd = new LinkedList<>(); for (String param : config.getCassStartupScript().split(" ")) { - if (StringUtils.isNotBlank(param)) - startCmd.add(param); + if (StringUtils.isNotBlank(param)) startCmd.add(param); } return startCmd; } @@ -132,16 +129,14 @@ String readProcessStream(InputStream inputStream) throws IOException { final byte[] buffer = new byte[512]; final ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.length); int cnt; - while ((cnt = inputStream.read(buffer)) != -1) - baos.write(buffer, 0, cnt); + while ((cnt = inputStream.read(buffer)) != -1) baos.write(buffer, 0, cnt); return baos.toString(); } - public void stop(boolean force) throws IOException { logger.info("Stopping cassandra server ...."); List command = Lists.newArrayList(); - if(config.useSudo()) { + if (config.useSudo()) { logger.info("Configured to use sudo to stop C*"); if (!"root".equals(System.getProperty("user.name"))) { @@ -151,8 +146,7 @@ public void stop(boolean force) throws IOException { } } for (String param : config.getCassStopScript().split(" ")) { - if (StringUtils.isNotBlank(param)) - command.add(param); + if (StringUtils.isNotBlank(param)) command.add(param); } ProcessBuilder stopCass = new ProcessBuilder(command); stopCass.directory(new File("/")); @@ -161,31 +155,42 @@ public void stop(boolean force) throws IOException { instanceState.setShouldCassandraBeAlive(false); if (!force && config.getGracefulDrainHealthWaitSeconds() >= 0) { ExecutorService executor = Executors.newSingleThreadExecutor(); - Future drainFuture = executor.submit(() -> { - // As the node has been marked as shutting down above in setShouldCassandraBeAlive, we wait this - // duration to allow external healthcheck systems time to pick up the state change. - try { - Thread.sleep(config.getGracefulDrainHealthWaitSeconds() * 1000); - } catch (InterruptedException e) { - return; - } - - try { - JMXNodeTool nodetool = JMXNodeTool.instance(config); - nodetool.drain(); - } catch (InterruptedException | IOException | ExecutionException e) { - logger.error("Exception draining Cassandra, could not drain. Proceeding with shutdown.", e); - } - // Once Cassandra is drained the thrift/native servers are shutdown and there is no need to wait to - // stop Cassandra. Just stop it now. - }); - - // In case drain hangs, timeout the future and continue stopping anyways. Give drain 30s always - // In production we freqently see servers that do not want to drain + Future drainFuture = + executor.submit( + () -> { + // As the node has been marked as shutting down above in + // setShouldCassandraBeAlive, we wait this + // duration to allow external healthcheck systems time to pick up + // the state change. + try { + Thread.sleep(config.getGracefulDrainHealthWaitSeconds() * 1000); + } catch (InterruptedException e) { + return; + } + + try { + JMXNodeTool nodetool = JMXNodeTool.instance(config); + nodetool.drain(); + } catch (InterruptedException + | IOException + | ExecutionException e) { + logger.error( + "Exception draining Cassandra, could not drain. Proceeding with shutdown.", + e); + } + // Once Cassandra is drained the thrift/native servers are shutdown + // and there is no need to wait to + // stop Cassandra. Just stop it now. + }); + + // In case drain hangs, timeout the future and continue stopping anyways. Give drain 30s + // always + // In production we frequently see servers that do not want to drain try { drainFuture.get(config.getGracefulDrainHealthWaitSeconds() + 30, TimeUnit.SECONDS); } catch (ExecutionException | TimeoutException | InterruptedException e) { - logger.error("Waited 30s for drain but it did not complete, continuing to shutdown", e); + logger.error( + "Waited 30s for drain but it did not complete, continuing to shutdown", e); } } Process stopper = stopCass.start(); @@ -195,8 +200,7 @@ public void stop(boolean force) throws IOException { logger.info("Cassandra server has been stopped"); this.cassMonitorMetrics.incCassStop(); instanceState.setCassandraProcessAlive(false); - } - else { + } else { logger.error("Unable to stop cassandra server. Error code: {}", code); logProcessOutput(stopper); } @@ -204,4 +208,4 @@ public void stop(boolean force) throws IOException { logger.warn("couldn't shut down cassandra correctly", e); } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/ICassandraProcess.java b/priam/src/main/java/com/netflix/priam/defaultimpl/ICassandraProcess.java index fd33b86d7..8f0599649 100644 --- a/priam/src/main/java/com/netflix/priam/defaultimpl/ICassandraProcess.java +++ b/priam/src/main/java/com/netflix/priam/defaultimpl/ICassandraProcess.java @@ -17,8 +17,6 @@ package com.netflix.priam.defaultimpl; import com.google.inject.ImplementedBy; -import com.netflix.priam.defaultimpl.CassandraProcessManager; - import java.io.IOException; /** diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/IService.java b/priam/src/main/java/com/netflix/priam/defaultimpl/IService.java new file mode 100644 index 000000000..dddf8e10f --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/defaultimpl/IService.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.defaultimpl; + +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.scheduler.Task; +import com.netflix.priam.scheduler.TaskTimer; +import java.text.ParseException; +import org.quartz.SchedulerException; + +/** + * This is how we create a new service in Priam. Any service we start, should implement this + * interface so we can update the service at run-time if required. + * + *

Created by aagrawal on 3/9/19. + */ +public interface IService { + /** + * This method is called to schedule the service when we initialize it for first time ONLY. + * + * @throws Exception if there is any error while trying to schedule the service. + */ + void scheduleService() throws Exception; + + /** + * This method is called before we try to update the service. Use this method to do any kind of + * maintenance operations before we change the scheduling of all the jobs in service. + * + * @throws Exception if there is any error in pre-hook method of service. + */ + void updateServicePre() throws Exception; + + /** + * This method is called after we re-schedule all the services in PriamScheduler. Use this + * method for post hook maintenance operations after changing the scehdule of all the jobs. + * + * @throws Exception if there is any error in post-hook method of service. + */ + void updateServicePost() throws Exception; + + /** + * Schedule a given task. It will safely delete that task from scheduler before scheduling. + * + * @param priamScheduler Scheduler in use by Priam. + * @param task Task that needs to be scheduled in priamScheduler + * @param taskTimer Timer for the task + * @throws SchedulerException If there is any error in deleting the task or scheduling a new + * job. + * @throws ParseException If there is any error in parsing the taskTimer while trying to add a + * new job to scheduler. + */ + default void scheduleTask( + PriamScheduler priamScheduler, Class task, TaskTimer taskTimer) + throws SchedulerException, ParseException { + priamScheduler.deleteTask(task.getName()); + if (taskTimer == null) return; + priamScheduler.addTask(task.getName(), task, taskTimer); + } + + /** + * Update the service. This method will be called to update the service while Priam is running. + * + * @throws Exception if any issue while updating the service. + */ + default void onChangeUpdateService() throws Exception { + updateServicePre(); + scheduleService(); + updateServicePost(); + } +} diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/InjectedWebListener.java b/priam/src/main/java/com/netflix/priam/defaultimpl/InjectedWebListener.java index 8ebbe7303..1d7a1db57 100644 --- a/priam/src/main/java/com/netflix/priam/defaultimpl/InjectedWebListener.java +++ b/priam/src/main/java/com/netflix/priam/defaultimpl/InjectedWebListener.java @@ -22,25 +22,25 @@ import com.google.inject.Module; import com.google.inject.servlet.GuiceServletContextListener; import com.google.inject.servlet.ServletModule; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.PriamServer; +import com.netflix.priam.config.IConfiguration; import com.sun.jersey.api.core.PackagesResourceConfig; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; import com.sun.jersey.spi.container.servlet.ServletContainer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.servlet.ServletContextEvent; import org.quartz.Scheduler; import org.quartz.SchedulerException; import org.quartz.SchedulerFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.servlet.ServletContextEvent; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - public class InjectedWebListener extends GuiceServletContextListener { protected static final Logger logger = LoggerFactory.getLogger(InjectedWebListener.class); private Injector injector; + @Override protected Injector getInjector() { List moduleList = Lists.newArrayList(); @@ -48,8 +48,8 @@ protected Injector getInjector() { moduleList.add(new PriamGuiceModule()); injector = Guice.createInjector(moduleList); try { - injector.getInstance(IConfiguration.class).intialize(); - injector.getInstance(PriamServer.class).initialize(); + injector.getInstance(IConfiguration.class).initialize(); + injector.getInstance(PriamServer.class).scheduleService(); } catch (Exception e) { logger.error(e.getMessage(), e); throw new RuntimeException(e.getMessage(), e); @@ -59,14 +59,12 @@ protected Injector getInjector() { @Override public void contextDestroyed(ServletContextEvent servletContextEvent) { - try - { - for (Scheduler scheduler : injector.getInstance(SchedulerFactory.class).getAllSchedulers()){ + try { + for (Scheduler scheduler : + injector.getInstance(SchedulerFactory.class).getAllSchedulers()) { scheduler.shutdown(); } - } - catch (SchedulerException e) - { + } catch (SchedulerException e) { throw new RuntimeException(e); } super.contextDestroyed(servletContextEvent); @@ -75,12 +73,11 @@ public void contextDestroyed(ServletContextEvent servletContextEvent) { public static class JaxServletModule extends ServletModule { @Override protected void configureServlets() { - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(PackagesResourceConfig.PROPERTY_PACKAGES, "unbound"); params.put("com.sun.jersey.config.property.packages", "com.netflix.priam.resources"); params.put(ServletContainer.PROPERTY_FILTER_CONTEXT_PATH, "/REST"); serve("/REST/*").with(GuiceContainer.class, params); } } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/defaultimpl/PriamGuiceModule.java b/priam/src/main/java/com/netflix/priam/defaultimpl/PriamGuiceModule.java index a6fc032f0..0f68962ac 100644 --- a/priam/src/main/java/com/netflix/priam/defaultimpl/PriamGuiceModule.java +++ b/priam/src/main/java/com/netflix/priam/defaultimpl/PriamGuiceModule.java @@ -18,8 +18,6 @@ import com.google.inject.AbstractModule; import com.google.inject.name.Names; -import com.netflix.priam.cred.ICredential; -import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.aws.S3CrossAccountFileSystem; import com.netflix.priam.aws.S3EncryptedFileSystem; import com.netflix.priam.aws.S3FileSystem; @@ -27,8 +25,11 @@ import com.netflix.priam.aws.auth.IS3Credential; import com.netflix.priam.aws.auth.S3RoleAssumptionCredential; import com.netflix.priam.backup.IBackupFileSystem; -import com.netflix.priam.backup.parallel.CassandraBackupQueueMgr; -import com.netflix.priam.backup.parallel.ITaskQueueMgr; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.backupv2.MetaV1Proxy; +import com.netflix.priam.backupv2.MetaV2Proxy; +import com.netflix.priam.cred.ICredential; +import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.cryptography.IFileCryptography; import com.netflix.priam.cryptography.pgp.PgpCredential; import com.netflix.priam.cryptography.pgp.PgpCryptography; @@ -39,26 +40,38 @@ import org.quartz.SchedulerFactory; import org.quartz.impl.StdSchedulerFactory; - public class PriamGuiceModule extends AbstractModule { @Override protected void configure() { bind(SchedulerFactory.class).to(StdSchedulerFactory.class).asEagerSingleton(); bind(IBackupFileSystem.class).annotatedWith(Names.named("backup")).to(S3FileSystem.class); - bind(IBackupFileSystem.class).annotatedWith(Names.named("encryptedbackup")).to(S3EncryptedFileSystem.class); - bind(IBackupFileSystem.class).annotatedWith(Names.named("incr_restore")).to(S3FileSystem.class); - bind(IBackupFileSystem.class).annotatedWith(Names.named("backup_status")).to(S3FileSystem.class); + bind(IBackupFileSystem.class) + .annotatedWith(Names.named("encryptedbackup")) + .to(S3EncryptedFileSystem.class); bind(S3CrossAccountFileSystem.class); - bind(IBackupFileSystem.class).annotatedWith(Names.named("gcsencryptedbackup")).to(GoogleEncryptedFileSystem.class); - bind(IS3Credential.class).annotatedWith(Names.named("awss3roleassumption")).to(S3RoleAssumptionCredential.class); - bind(ICredential.class).annotatedWith(Names.named("awsec2roleassumption")).to(EC2RoleAssumptionCredential.class); - bind(IFileCryptography.class).annotatedWith(Names.named("filecryptoalgorithm")).to(PgpCryptography.class); - bind(ICredentialGeneric.class).annotatedWith(Names.named("gcscredential")).to(GcsCredential.class); - bind(ICredentialGeneric.class).annotatedWith(Names.named("pgpcredential")).to(PgpCredential.class); - bind(ITaskQueueMgr.class).annotatedWith(Names.named("backup")).to(CassandraBackupQueueMgr.class); + bind(IBackupFileSystem.class) + .annotatedWith(Names.named("gcsencryptedbackup")) + .to(GoogleEncryptedFileSystem.class); + bind(IS3Credential.class) + .annotatedWith(Names.named("awss3roleassumption")) + .to(S3RoleAssumptionCredential.class); + bind(ICredential.class) + .annotatedWith(Names.named("awsec2roleassumption")) + .to(EC2RoleAssumptionCredential.class); + bind(IFileCryptography.class) + .annotatedWith(Names.named("filecryptoalgorithm")) + .to(PgpCryptography.class); + bind(ICredentialGeneric.class) + .annotatedWith(Names.named("gcscredential")) + .to(GcsCredential.class); + bind(ICredentialGeneric.class) + .annotatedWith(Names.named("pgpcredential")) + .to(PgpCredential.class); + bind(IMetaProxy.class).annotatedWith(Names.named("v1")).to(MetaV1Proxy.class); + bind(IMetaProxy.class).annotatedWith(Names.named("v2")).to(MetaV2Proxy.class); bind(Registry.class).toInstance(new NoopRegistry()); } } diff --git a/priam/src/main/java/com/netflix/priam/google/GcsCredential.java b/priam/src/main/java/com/netflix/priam/google/GcsCredential.java index cfb3f2b79..21cfa4023 100755 --- a/priam/src/main/java/com/netflix/priam/google/GcsCredential.java +++ b/priam/src/main/java/com/netflix/priam/google/GcsCredential.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.google; @@ -22,11 +20,11 @@ /* * A generic implementation of fetch keys as plaintext. The key values are used with Google Cloud Storage. Users may - * want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm. + * want to provide an implementation where your key(s)' value is decrypted using AES encryption algorithm. */ public class GcsCredential implements ICredentialGeneric { - private IConfiguration config; + private final IConfiguration config; @Inject public GcsCredential(IConfiguration config) { @@ -53,5 +51,4 @@ public byte[] getValue(KEY key) { throw new IllegalArgumentException("Key value not supported."); } } - } diff --git a/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystem.java b/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystem.java index 9ce1957c3..f008e5726 100755 --- a/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystem.java +++ b/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystem.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.google; @@ -26,30 +24,26 @@ import com.google.inject.Inject; import com.google.inject.Provider; import com.google.inject.name.Named; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.AbstractFileSystem; +import com.netflix.priam.backup.BackupRestoreException; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.cred.ICredentialGeneric.KEY; -import com.netflix.priam.aws.S3BackupPath; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.BackupRestoreException; -import com.netflix.priam.backup.IBackupFileSystem; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.MBeanServer; -import javax.management.ObjectName; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; import java.io.*; -import java.lang.management.ManagementFactory; +import java.nio.file.Path; +import java.time.Instant; import java.util.ArrayList; import java.util.Collection; -import java.util.Date; import java.util.Iterator; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; +import java.util.List; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class GoogleEncryptedFileSystem implements IBackupFileSystem, GoogleEncryptedFileSystemMBean { +public class GoogleEncryptedFileSystem extends AbstractFileSystem { private static final Logger logger = LoggerFactory.getLogger(GoogleEncryptedFileSystem.class); @@ -57,53 +51,36 @@ public class GoogleEncryptedFileSystem implements IBackupFileSystem, GoogleEncry private static final JsonFactory JSON_FACTORY = JacksonFactory.getDefaultInstance(); private HttpTransport httpTransport; - private Credential credential; //represents our "service account" credentials we will use to access GCS + // represents our "service account" credentials we will use to access GCS + private Credential credential; private Storage gcsStorageHandle; private Storage.Objects objectsResoruceHandle = null; - - private Provider pathProvider; private String srcBucketName; - private IConfiguration config; - private AtomicInteger downloadCount = new AtomicInteger(); - protected AtomicLong bytesDownloaded = new AtomicLong(); + private final IConfiguration config; - private ICredentialGeneric gcsCredential; + private final ICredentialGeneric gcsCredential; + private final BackupMetrics backupMetrics; @Inject - public GoogleEncryptedFileSystem(Provider pathProvider, final IConfiguration config - , @Named("gcscredential") ICredentialGeneric credential) { - - this.pathProvider = pathProvider; + public GoogleEncryptedFileSystem( + Provider pathProvider, + final IConfiguration config, + @Named("gcscredential") ICredentialGeneric credential, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationManager) { + super(config, backupMetrics, backupNotificationManager, pathProvider); + this.backupMetrics = backupMetrics; this.config = config; this.gcsCredential = credential; try { - this.httpTransport = GoogleNetHttpTransport.newTrustedTransport(); - - } catch (Exception e) { - throw new IllegalStateException("Unable to create a handle to the Google Http tranport", e); - } - - this.srcBucketName = getSourcebucket(getPathPrefix()); - - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - String mbeanName = MBEAN_NAME; - try { - mbs.registerMBean(this, new ObjectName(mbeanName)); } catch (Exception e) { - throw new RuntimeException("Unable to regiser JMX bean: " + mbeanName + " to JMX server. Msg: " + e.getLocalizedMessage(), e); + throw new IllegalStateException( + "Unable to create a handle to the Google Http tranport", e); } - } - - /* - * @param pathprefix - the absolute path (including bucket name) to the object. - */ - private String getSourcebucket(String pathPrefix) { - - String[] paths = pathPrefix.split(String.valueOf(S3BackupPath.PATH_SEP)); - return paths[0]; + this.srcBucketName = getShard(); } private Storage.Objects constructObjectResourceHandle() { @@ -112,38 +89,38 @@ private Storage.Objects constructObjectResourceHandle() { } constructGcsStorageHandle(); - this.objectsResoruceHandle = this.gcsStorageHandle.objects(); - return this.objectsResoruceHandle; } /* * Get a handle to the GCS api to manage our data within their storage. Code derive from * https://code.google.com/p/google-api-java-client/source/browse/storage-cmdline-sample/src/main/java/com/google/api/services/samples/storage/cmdline/StorageSample.java?repo=samples - * + * * Note: GCS storage will use our credential to do auto-refresh of expired tokens */ - private Storage constructGcsStorageHandle() { if (this.gcsStorageHandle != null) { return this.gcsStorageHandle; } try { - constructGcsCredential(); - } catch (Exception e) { throw new IllegalStateException("Exception during GCS authorization", e); } - this.gcsStorageHandle = new Storage.Builder(this.httpTransport, JSON_FACTORY, this.credential).setApplicationName(APPLICATION_NAME).build(); + this.gcsStorageHandle = + new Storage.Builder(this.httpTransport, JSON_FACTORY, this.credential) + .setApplicationName(APPLICATION_NAME) + .build(); return this.gcsStorageHandle; } - /** Authorizes the installed application to access user's protected data, code from https://developers.google.com/maps-engine/documentation/oauth/serviceaccount - * and http://javadoc.google-api-java-client.googlecode.com/hg/1.8.0-beta/com/google/api/client/googleapis/auth/oauth2/GoogleCredential.html + /** + * Authorizes the installed application to access user's protected data, code from + * https://developers.google.com/maps-engine/documentation/oauth/serviceaccount and + * http://javadoc.google-api-java-client.googlecode.com/hg/1.8.0-beta/com/google/api/client/googleapis/auth/oauth2/GoogleCredential.html */ private Credential constructGcsCredential() throws Exception { @@ -152,198 +129,135 @@ private Credential constructGcsCredential() throws Exception { } synchronized (this) { - if (this.credential == null) { - String service_acct_email = new String(this.gcsCredential.getValue(KEY.GCS_SERVICE_ID)); + String service_acct_email = + new String(this.gcsCredential.getValue(KEY.GCS_SERVICE_ID)); - if (this.config.getGcsServiceAccountPrivateKeyLoc() == null || this.config.getGcsServiceAccountPrivateKeyLoc().isEmpty()) { - throw new NullPointerException("Fast property for the the GCS private key file is null/empty."); + if (this.config.getGcsServiceAccountPrivateKeyLoc() == null + || this.config.getGcsServiceAccountPrivateKeyLoc().isEmpty()) { + throw new NullPointerException( + "Fast property for the the GCS private key file is null/empty."); } - //Take the encrypted private key, decrypted into an in-transit file which is passed to GCS - File gcsPrivateKeyHandle = new File(this.config.getGcsServiceAccountPrivateKeyLoc() + ".output"); + // Take the encrypted private key, decrypted into an in-transit file which is passed + // to GCS + File gcsPrivateKeyHandle = + new File(this.config.getGcsServiceAccountPrivateKeyLoc() + ".output"); - OutputStream os = new FileOutputStream(gcsPrivateKeyHandle); - BufferedOutputStream bos = new BufferedOutputStream(os); ByteArrayOutputStream byteos = new ByteArrayOutputStream(); - byte[] gcsPrivateKeyPlainText = this.gcsCredential.getValue(KEY.GCS_PRIVATE_KEY_LOC); - try { - + byte[] gcsPrivateKeyPlainText = + this.gcsCredential.getValue(KEY.GCS_PRIVATE_KEY_LOC); + try (BufferedOutputStream bos = + new BufferedOutputStream(new FileOutputStream(gcsPrivateKeyHandle))) { byteos.write(gcsPrivateKeyPlainText); byteos.writeTo(bos); - } catch (IOException e) { - - throw new IOException("Exception when writing decrypted gcs private key value to disk.", e); - - } finally { - try { - bos.close(); - } catch (IOException e) { - throw new IOException("Exception when closing decrypted gcs private key value to disk.", e); - } + throw new IOException( + "Exception when writing decrypted gcs private key value to disk.", e); } - Collection scopes = new ArrayList(1); + Collection scopes = new ArrayList<>(1); scopes.add(StorageScopes.DEVSTORAGE_READ_ONLY); - this.credential = new GoogleCredential.Builder().setTransport(this.httpTransport) - .setJsonFactory(JSON_FACTORY) - .setServiceAccountId(service_acct_email) - .setServiceAccountScopes(scopes) - .setServiceAccountPrivateKeyFromP12File(gcsPrivateKeyHandle) //Cryptex decrypted service account key derive from the GCS console - .build(); + // Cryptex decrypted service account key derive from the GCS console + this.credential = + new GoogleCredential.Builder() + .setTransport(this.httpTransport) + .setJsonFactory(JSON_FACTORY) + .setServiceAccountId(service_acct_email) + .setServiceAccountScopes(scopes) + .setServiceAccountPrivateKeyFromP12File(gcsPrivateKeyHandle) + .build(); } - } return this.credential; } @Override - public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException { - - logger.info("Downloading {} from GCS bucket {}", path.getRemotePath(), this.srcBucketName); - this.downloadCount.incrementAndGet(); - - String objectName = parseObjectname(getPathPrefix()); + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + String remotePath = path.getRemotePath(); + File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix); + String objectName = parseObjectname(getPrefix().toString()); + com.google.api.services.storage.Storage.Objects.Get get; - com.google.api.services.storage.Storage.Objects.Get get = null; try { - - get = constructObjectResourceHandle().get(this.srcBucketName, path.getRemotePath()); - + get = constructObjectResourceHandle().get(this.srcBucketName, remotePath); } catch (IOException e) { - throw new BackupRestoreException("IO error retrieving metadata for: " + objectName + " from bucket: " + this.srcBucketName, e); + throw new BackupRestoreException( + "IO error retrieving metadata for: " + + objectName + + " from bucket: " + + this.srcBucketName, + e); } - get.getMediaHttpDownloader().setDirectDownloadEnabled(true); // If you're not using GCS' AppEngine, download the whole thing (instead of chunks) in one request, if possible. - InputStream is = null; - try { - - is = get.executeMediaAsInputStream(); + // If you're not using GCS' AppEngine, download the whole thing (instead of chunks) in one + // request, if possible. + get.getMediaHttpDownloader().setDirectDownloadEnabled(true); + try (OutputStream os = new FileOutputStream(localFile); + InputStream is = get.executeMediaAsInputStream()) { IOUtils.copyLarge(is, os); - - } catch (IOException e) { - throw new BackupRestoreException("IO error during streaming of object: " + objectName + " from bucket: " + this.srcBucketName, e); + throw new BackupRestoreException( + "IO error during streaming of object: " + + objectName + + " from bucket: " + + this.srcBucketName, + e); } catch (Exception ex) { - - throw new BackupRestoreException("Exception encountered when copying bytes from input to output", ex); - - } finally { - IOUtils.closeQuietly(is); - IOUtils.closeQuietly(os); + throw new BackupRestoreException( + "Exception encountered when copying bytes from input to output", ex); } - bytesDownloaded.addAndGet(get.getLastResponseHeaders().getContentLength()); - + backupMetrics.recordDownloadRate(get.getLastResponseHeaders().getContentLength()); } @Override - public void download(AbstractBackupPath path, OutputStream os, String filePath) throws BackupRestoreException { - try { - - download(path, os); - - } catch (Exception e) { - throw new BackupRestoreException(e.getMessage(), e); - } + protected boolean doesRemoteFileExist(Path remotePath) { + // TODO: Implement based on GCS. Since this is only used for upload, leaving it empty + return false; } @Override - public void upload(AbstractBackupPath path, InputStream in) - throws BackupRestoreException { - throw new UnsupportedOperationException(); - - } - - @Override - public Iterator list(String path, Date start, Date till) { - return new GoogleFileIterator(pathProvider, constructGcsStorageHandle(), path, start, till); - } - - @Override - public Iterator listPrefixes(Date date) { - // TODO Auto-generated method stub - return null; + public Iterator listFileSystem(String prefix, String delimiter, String marker) { + return new GoogleFileIterator(constructGcsStorageHandle(), prefix, null); } @Override public void cleanup() { // TODO Auto-generated method stub - - } - - @Override - public int getActivecount() { - // TODO Auto-generated method stub - return 0; - } @Override public void shutdown() { // TODO Auto-generated method stub - } @Override - public int downloadCount() { - return this.downloadCount.get(); - } - - @Override - public int uploadCount() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long bytesUploaded() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getBytesUploaded() { - return 0; + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + throw new UnsupportedOperationException(); } @Override - public long getAWSSlowDownExceptionCounter() { + public long getFileSize(String remotePath) throws BackupRestoreException { return 0; } @Override - public long bytesDownloaded() { - return this.bytesDownloaded.get(); - } - - /** - * Get restore prefix which will be used to locate GVS files - */ - public String getPathPrefix() { - - String prefix; - if (StringUtils.isNotBlank(config.getRestorePrefix())) - prefix = config.getRestorePrefix(); - else - prefix = config.getBackupPrefix(); - - return prefix; + public void deleteFiles(List remotePaths) throws BackupRestoreException { + // TODO: Delete implementation } /* * @param pathPrefix * @return objectName */ - public static String parseObjectname(String pathPrefix) { + static String parseObjectname(String pathPrefix) { int offset = pathPrefix.lastIndexOf(0x2f); return pathPrefix.substring(offset + 1); - } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystemMBean.java b/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystemMBean.java deleted file mode 100755 index a235316d3..000000000 --- a/priam/src/main/java/com/netflix/priam/google/GoogleEncryptedFileSystemMBean.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.google; - -public interface GoogleEncryptedFileSystemMBean { - - String MBEAN_NAME = "com.priam.google.GoogleEncryptedFileSystemMBean:name=GoogleEncryptedFileSystemMBean"; - - int downloadCount(); - - int uploadCount(); - - int getActivecount(); - - long bytesUploaded(); - - long bytesDownloaded(); -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/google/GoogleFileIterator.java b/priam/src/main/java/com/netflix/priam/google/GoogleFileIterator.java index 851d3289e..ac06e5592 100755 --- a/priam/src/main/java/com/netflix/priam/google/GoogleFileIterator.java +++ b/priam/src/main/java/com/netflix/priam/google/GoogleFileIterator.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.google; @@ -18,146 +16,86 @@ import com.google.api.services.storage.Storage; import com.google.api.services.storage.model.StorageObject; import com.google.common.collect.Lists; -import com.google.inject.Provider; -import com.netflix.priam.aws.S3BackupPath; -import com.netflix.priam.backup.AbstractBackupPath; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; -import java.util.Date; import java.util.Iterator; import java.util.List; /* * Represents a list of objects within Google Cloud Storage (GCS) */ -public class GoogleFileIterator implements Iterator { - private static final Logger logger = LoggerFactory.getLogger(GoogleFileIterator.class); - - private Date start; - private Date till; - private Iterator iterator; - private Provider pathProvider; +public class GoogleFileIterator implements Iterator { + private Iterator iterator; private String bucketName; - + private String prefix; private Storage.Objects objectsResoruceHandle = null; private Storage.Objects.List listObjectsSrvcHandle = null; private com.google.api.services.storage.model.Objects objectsContainerHandle = null; - private String pathWithinBucket; - - private String nextPageToken; - - /* - * @param pathProvider - * @param gcsStorageHandle - a means to perform operations within the destination storage. - * @param path - metadata about where the object exist within the destination. - * @param start - timeframe of object to restore - * @param till - timeframe of object to restore - */ - public GoogleFileIterator(Provider pathProvider, Storage gcsStorageHandle, String path, Date start, Date till) { - - this.start = start; - this.till = till; - this.pathProvider = pathProvider; + public GoogleFileIterator(Storage gcsStorageHandle, String bucket, String prefix) { this.objectsResoruceHandle = gcsStorageHandle.objects(); + this.bucketName = bucket; + this.prefix = prefix; - if (path == null) { - throw new NullPointerException("Path of object to fetch is null"); - } - - String[] paths = path.split(String.valueOf(S3BackupPath.PATH_SEP)); - if (paths.length < 1) { - throw new IllegalStateException("Path of object to fetch is invalid. Path: " + path); + try { // == Get the initial page of results + this.iterator = createIterator(); + } catch (Exception e) { + throw new RuntimeException( + "Exception encountered fetching elements, msg: ." + e.getLocalizedMessage(), e); } + } - this.bucketName = paths[0]; - this.pathWithinBucket = pathProvider.get().remotePrefix(start, till, path); - - logger.info("Listing objects from GCS: {}, prefix: {}", this.bucketName, this.pathWithinBucket); - + private void initListing() { try { - this.listObjectsSrvcHandle = objectsResoruceHandle.list(bucketName); //== list objects within bucket - + this.listObjectsSrvcHandle = + objectsResoruceHandle.list(bucketName); // == list objects within bucket + // fetch elements within bucket that matches this prefix + this.listObjectsSrvcHandle.setPrefix(this.prefix); } catch (IOException e) { throw new RuntimeException("Unable to get gcslist handle to bucket: " + bucketName, e); } - - this.listObjectsSrvcHandle.setPrefix(this.pathWithinBucket); //fetch elements within bucket that matches this prefix - - try { //== Get the initial page of results - - this.iterator = createIterator(); - - } catch (Exception e) { - throw new RuntimeException("Exception encountered fetching elements, msg: ." + e.getLocalizedMessage(), e); - } } /* * Fetch a page of results */ - private Iterator createIterator() throws Exception { - List temp = Lists.newArrayList(); //a container of results - - this.objectsContainerHandle = listObjectsSrvcHandle.execute(); //Sends the metadata request to the server and returns the parsed metadata response. + private Iterator createIterator() throws Exception { + if (listObjectsSrvcHandle == null) initListing(); + List temp = Lists.newArrayList(); // a container of results - for (StorageObject object : this.objectsContainerHandle.getItems()) { //processing a page of results - String fileName = GoogleEncryptedFileSystem.parseObjectname(object.getName()); - logger.debug("id: {}, parse file name: {}, name: {}", object.getId(), fileName, object.getName()); + // Sends the metadata request to the server and returns the parsed metadata response. + this.objectsContainerHandle = listObjectsSrvcHandle.execute(); - AbstractBackupPath path = pathProvider.get(); - path.parseRemote(object.getName()); //e.g. of objectname: prod_backup/us-east-1/cass_account/113427455640312821154458202479064646083/201408250801/META/meta.json - logger.debug("New key {} path = {} start: {} end: {} my {}", object.getName(), path.getRemotePath(), start, till, path.getTime()); - if ((path.getTime().after(start) && path.getTime().before(till)) || path.getTime().equals(start)) { - temp.add(path); - logger.debug("Added key {}", object.getName()); - } + for (StorageObject object : this.objectsContainerHandle.getItems()) { + // processing a page of results + temp.add(object.getName()); } - - this.nextPageToken = this.objectsContainerHandle.getNextPageToken(); - return temp.iterator(); } @Override public boolean hasNext() { - if (this.iterator == null) - return false; - if (this.iterator.hasNext()) { return true; } - if (this.nextPageToken == null) { //there is no additional results - return false; - } - - try {//if here, you have iterated through all elements of the previous page, now, get the next page of results - - this.listObjectsSrvcHandle.setPageToken(this.nextPageToken); //get the next page of results - this.iterator = createIterator(); - - } catch (Exception e) { - throw new RuntimeException("Exception encountered fetching elements, see previous messages for details.", e); - } + while (this.objectsContainerHandle.getNextPageToken() != null && !iterator.hasNext()) + try { // if here, you have iterated through all elements of the previous page, now, get + // the next page of results + this.listObjectsSrvcHandle.setPageToken(objectsContainerHandle.getNextPageToken()); + this.iterator = createIterator(); + } catch (Exception e) { + throw new RuntimeException( + "Exception encountered fetching elements, see previous messages for details.", + e); + } return this.iterator.hasNext(); - } @Override - public AbstractBackupPath next() { + public String next() { return iterator.next(); } - - @Override - public void remove() { - // TODO Auto-generated method stub - - } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/utils/CassandraMonitor.java b/priam/src/main/java/com/netflix/priam/health/CassandraMonitor.java similarity index 74% rename from priam/src/main/java/com/netflix/priam/utils/CassandraMonitor.java rename to priam/src/main/java/com/netflix/priam/health/CassandraMonitor.java index 9c13f3bd0..846bbed81 100644 --- a/priam/src/main/java/com/netflix/priam/utils/CassandraMonitor.java +++ b/priam/src/main/java/com/netflix/priam/health/CassandraMonitor.java @@ -1,5 +1,5 @@ /* - * Copyright 2013 Netflix, Inc. + * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,27 +14,26 @@ * limitations under the License. * */ -package com.netflix.priam.utils; +package com.netflix.priam.health; import com.google.inject.Inject; import com.google.inject.Singleton; -import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.health.InstanceState; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.merics.CassMonitorMetrics; import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.Task; import com.netflix.priam.scheduler.TaskTimer; -import org.apache.cassandra.tools.NodeProbe; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.cassandra.tools.NodeProbe; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /* * This task checks if the Cassandra process is running. @@ -45,12 +44,16 @@ public class CassandraMonitor extends Task { public static final String JOBNAME = "CASS_MONITOR_THREAD"; private static final Logger logger = LoggerFactory.getLogger(CassandraMonitor.class); private static final AtomicBoolean isCassandraStarted = new AtomicBoolean(false); - private InstanceState instanceState; - private ICassandraProcess cassProcess; - private CassMonitorMetrics cassMonitorMetrics; + private final InstanceState instanceState; + private final ICassandraProcess cassProcess; + private final CassMonitorMetrics cassMonitorMetrics; @Inject - protected CassandraMonitor(IConfiguration config, InstanceState instanceState, ICassandraProcess cassProcess, CassMonitorMetrics cassMonitorMetrics) { + protected CassandraMonitor( + IConfiguration config, + InstanceState instanceState, + ICassandraProcess cassProcess, + CassMonitorMetrics cassMonitorMetrics) { super(config); this.instanceState = instanceState; this.cassProcess = cassProcess; @@ -59,11 +62,10 @@ protected CassandraMonitor(IConfiguration config, InstanceState instanceState, I @Override public void execute() throws Exception { - try{ + try { checkRequiredDirectories(); instanceState.setIsRequiredDirectoriesExist(true); - }catch (IllegalStateException e) - { + } catch (IllegalStateException e) { instanceState.setIsRequiredDirectoriesExist(false); } @@ -71,22 +73,27 @@ public void execute() throws Exception { BufferedReader input = null; try { // This returns pid for the Cassandra process - // This needs to be sent as command list as "pipe" of results is not allowed. Also, do not try to change - // with pgrep as it has limitation of 4K command list (cassandra command can go upto 5-6 KB as cassandra lists all the libraries in command. - final String[] cmd = { "/bin/sh", "-c", "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName()}; + // This needs to be sent as command list as "pipe" of results is not allowed. Also, do + // not try to change + // with pgrep as it has limitation of 4K command list (cassandra command can go upto 5-6 + // KB as cassandra lists all the libraries in command. + final String[] cmd = { + "/bin/sh", + "-c", + "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName() + }; process = Runtime.getRuntime().exec(cmd); input = new BufferedReader(new InputStreamReader(process.getInputStream())); String line = input.readLine(); if (line != null) { - //Setting cassandra flag to true + // Setting cassandra flag to true instanceState.setCassandraProcessAlive(true); isCassandraStarted.set(true); NodeProbe bean = JMXNodeTool.instance(this.config); instanceState.setIsGossipActive(bean.isGossipRunning()); instanceState.setIsNativeTransportActive(bean.isNativeTransportRunning()); - instanceState.setIsThriftActive(bean.isThriftServerRunning()); } else { - //Setting cassandra flag to false + // Setting cassandra flag to false instanceState.setCassandraProcessAlive(false); isCassandraStarted.set(false); } @@ -101,16 +108,18 @@ public void execute() throws Exception { IOUtils.closeQuietly(process.getErrorStream()); } - if (input != null) - IOUtils.closeQuietly(input); + if (input != null) IOUtils.closeQuietly(input); } try { int rate = config.getRemediateDeadCassandraRate(); if (rate >= 0 && !config.doesCassandraStartManually()) { - if (instanceState.shouldCassandraBeAlive() && !instanceState.isCassandraProcessAlive()) { + if (instanceState.shouldCassandraBeAlive() + && !instanceState.isCassandraProcessAlive()) { long msNow = System.currentTimeMillis(); - if (rate == 0 || ((instanceState.getLastAttemptedStartTime() + rate * 1000) < msNow)) { + if (rate == 0 + || ((instanceState.getLastAttemptedStartTime() + rate * 1000) + < msNow)) { cassMonitorMetrics.incCassAutoStart(); cassProcess.start(true); instanceState.markLastAttemptedStartTime(); @@ -137,10 +146,13 @@ private void checkDirectory(String directory) { private void checkDirectory(File directory) { if (!directory.exists()) - throw new IllegalStateException(String.format("Directory: {} does not exist", directory)); + throw new IllegalStateException( + String.format("Directory: %s does not exist", directory)); if (!directory.canRead() || !directory.canWrite()) - throw new IllegalStateException(String.format("Directory: {} does not have read/write permissions.")); + throw new IllegalStateException( + String.format( + "Directory: %s does not have read/write permissions.", directory)); } public static TaskTimer getTimer() { @@ -156,10 +168,9 @@ public static Boolean hasCassadraStarted() { return isCassandraStarted.get(); } - //Added for testing only + // Added for testing only public static void setIsCassadraStarted() { - //Setting cassandra flag to true + // Setting cassandra flag to true isCassandraStarted.set(true); } - } diff --git a/priam/src/main/java/com/netflix/priam/health/InstanceState.java b/priam/src/main/java/com/netflix/priam/health/InstanceState.java index 13d23aa49..b9cc3c00f 100644 --- a/priam/src/main/java/com/netflix/priam/health/InstanceState.java +++ b/priam/src/main/java/com/netflix/priam/health/InstanceState.java @@ -21,53 +21,38 @@ import com.netflix.priam.backup.BackupMetadata; import com.netflix.priam.backup.Status; import com.netflix.priam.utils.GsonJsonSerializer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.time.LocalDateTime; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; /** - * Contains the state of the health of processed managed by Priam, and - * maintains the isHealthy flag used for reporting discovery health check. - *

- * Created by aagrawal on 9/19/17. + * Contains the state of the health of processed managed by Priam, and maintains the isHealthy flag + * used for reporting discovery health check. + * + *

Created by aagrawal on 9/19/17. */ @Singleton public class InstanceState { - private static final Logger logger = LoggerFactory.getLogger(InstanceState.class); - - public enum NODE_STATE { - JOIN, //This state to be used when Priam is joining the ring for the first time or was already assigned this token. - REPLACE //This state to be used when Priam replaces an instance from the token range. - } - //Bootstrap status - private final AtomicBoolean isBootstrapping = new AtomicBoolean(false); - private NODE_STATE nodeState; - private LocalDateTime bootstrapTime; - - //Cassandra process status + // Cassandra process status private final AtomicBoolean isCassandraProcessAlive = new AtomicBoolean(false); private final AtomicBoolean shouldCassandraBeAlive = new AtomicBoolean(false); private final AtomicLong lastAttemptedStartTime = new AtomicLong(Long.MAX_VALUE); private final AtomicBoolean isGossipActive = new AtomicBoolean(false); - private final AtomicBoolean isThriftActive = new AtomicBoolean(false); private final AtomicBoolean isNativeTransportActive = new AtomicBoolean(false); private final AtomicBoolean isRequiredDirectoriesExist = new AtomicBoolean(false); private final AtomicBoolean isYmlWritten = new AtomicBoolean(false); private final AtomicBoolean isHealthy = new AtomicBoolean(false); private final AtomicBoolean isHealthyOverride = new AtomicBoolean(true); - //Backup status + // This is referenced when this class is serialized to a String1 private BackupMetadata backupStatus; - //Restore status - private RestoreStatus restoreStatus; + // Restore status + private final RestoreStatus restoreStatus; @Inject - InstanceState(RestoreStatus restoreStatus){ + InstanceState(RestoreStatus restoreStatus) { this.restoreStatus = restoreStatus; } @@ -85,15 +70,6 @@ public void setIsGossipActive(boolean isGossipActive) { setHealthy(); } - public boolean isThriftActive() { - return isThriftActive.get(); - } - - public void setIsThriftActive(boolean isThriftActive) { - this.isThriftActive.set(isThriftActive); - setHealthy(); - } - public boolean isNativeTransportActive() { return isNativeTransportActive.get(); } @@ -146,36 +122,7 @@ public long getLastAttemptedStartTime() { return this.lastAttemptedStartTime.get(); } - /* Boostrap */ - public boolean isBootstrapping() { - return isBootstrapping.get(); - } - - public void setBootstrapping(boolean isBootstrapping) { - this.isBootstrapping.set(isBootstrapping); - } - - public NODE_STATE getNodeState() { - return nodeState; - } - - public LocalDateTime getBootstrapTime() { - return bootstrapTime; - } - - public void setBootstrapTime(LocalDateTime bootstrapTime) { - this.bootstrapTime = bootstrapTime; - } - - public void setNodeState(NODE_STATE nodeState) { - this.nodeState = nodeState; - } - /* Backup */ - public BackupMetadata getBackupStatus() { - return backupStatus; - } - public void setBackupStatus(BackupMetadata backupMetadata) { this.backupStatus = backupMetadata; } @@ -185,8 +132,9 @@ public RestoreStatus getRestoreStatus() { return restoreStatus; } - // A dirty way to set restore status. This is required as setting restore status implies health could change. - public void setRestoreStatus(Status status){ + // A dirty way to set restore status. This is required as setting restore status implies health + // could change. + public void setRestoreStatus(Status status) { restoreStatus.status = status; setHealthy(); } @@ -195,20 +143,21 @@ public boolean isHealthy() { return isHealthy.get(); } - private boolean isRestoring(){ - return restoreStatus != null && restoreStatus.getStatus() != null && restoreStatus.getStatus() == Status.STARTED; + private boolean isRestoring() { + return restoreStatus != null + && restoreStatus.getStatus() != null + && restoreStatus.getStatus() == Status.STARTED; } + private void setHealthy() { this.isHealthy.set( - isRestoring() || - (isCassandraProcessAlive() && - isRequiredDirectoriesExist() && - isGossipActive() && - isYmlWritten() && - isHealthyOverride() && - (isThriftActive() || isNativeTransportActive()) - ) - ); + isRestoring() + || (isCassandraProcessAlive() + && isRequiredDirectoriesExist() + && isGossipActive() + && isYmlWritten() + && isHealthyOverride() + && isNativeTransportActive())); } public boolean isYmlWritten() { @@ -220,12 +169,15 @@ public void setYmlWritten(boolean yml) { } public static class RestoreStatus { - private LocalDateTime startDateRange, endDateRange; //Date range to restore from - private LocalDateTime executionStartTime, executionEndTime; //Start-end time of the actual restore execution - private String snapshotMetaFile; //Location of the snapshot meta file selected for restore. - private Status status; //the state of a restore. Note: this is different than the "status" of a Task. - - public void resetStatus(){ + private LocalDateTime startDateRange, endDateRange; // Date range to restore from + // Start-end time of the actual restore execution + // Note these are referenced when this class is serialized to a String. + private LocalDateTime executionStartTime, executionEndTime; + private String snapshotMetaFile; // Location of the snapshot meta file selected for restore. + // the state of a restore. Note: this is different than the "status" of a Task. + private Status status; + + public void resetStatus() { this.snapshotMetaFile = null; this.status = null; this.startDateRange = endDateRange = null; @@ -269,10 +221,6 @@ public LocalDateTime getExecutionStartTime() { return executionStartTime; } - public LocalDateTime getExecutionEndTime() { - return executionEndTime; - } - public String getSnapshotMetaFile() { return snapshotMetaFile; } @@ -281,4 +229,4 @@ public void setSnapshotMetaFile(String snapshotMetaFile) { this.snapshotMetaFile = snapshotMetaFile; } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/identity/AwsInstanceEnvIdentity.java b/priam/src/main/java/com/netflix/priam/identity/AwsInstanceEnvIdentity.java deleted file mode 100644 index e0d12b3d3..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/AwsInstanceEnvIdentity.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity; - -import com.google.inject.Singleton; -import com.netflix.priam.identity.config.AWSVpcInstanceDataRetriever; -import com.netflix.priam.identity.config.InstanceDataRetriever; - -/* - * A means to determine if running instance is within classic, default vpc account, or non-default vpc account - */ -@Singleton -public class AwsInstanceEnvIdentity implements InstanceEnvIdentity { - - private Boolean isClassic = false, isDefaultVpc = false, isNonDefaultVpc = false; - - public AwsInstanceEnvIdentity() { - String vpcId = getVpcId(); - if (vpcId == null || vpcId.isEmpty()) { - this.isClassic = true; - } else { - this.isNonDefaultVpc = true; //our instances run under a non default ("persistence_*") AWS acct - } - } - - /* - * @return the vpc id of the running instance, null if instance is not running within vpc. - */ - private String getVpcId() { - InstanceDataRetriever insDataRetriever = new AWSVpcInstanceDataRetriever(); - return insDataRetriever.getVpcId(); - } - - @Override - public Boolean isClassic() { - return this.isClassic; - } - - @Override - public Boolean isDefaultVpc() { - return this.isDefaultVpc; - } - - @Override - public Boolean isNonDefaultVpc() { - return this.isNonDefaultVpc; - } - -} diff --git a/priam/src/main/java/com/netflix/priam/identity/DoubleRing.java b/priam/src/main/java/com/netflix/priam/identity/DoubleRing.java index 2271d44d4..910b0f0d6 100644 --- a/priam/src/main/java/com/netflix/priam/identity/DoubleRing.java +++ b/priam/src/main/java/com/netflix/priam/identity/DoubleRing.java @@ -16,86 +16,101 @@ */ package com.netflix.priam.identity; -import com.google.common.collect.Lists; import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.utils.ITokenManager; -import org.apache.commons.io.IOUtils; +import java.io.*; +import java.util.Set; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.*; -import java.util.List; - -/** - * Class providing functionality for doubling the ring - */ +/** Class providing functionality for doubling the ring */ public class DoubleRing { private static final Logger logger = LoggerFactory.getLogger(DoubleRing.class); private static File TMP_BACKUP_FILE; private final IConfiguration config; - private final IPriamInstanceFactory factory; + private final IPriamInstanceFactory factory; private final ITokenManager tokenManager; + private final InstanceInfo instanceInfo; @Inject - public DoubleRing(IConfiguration config, IPriamInstanceFactory factory, ITokenManager tokenManager) { + public DoubleRing( + IConfiguration config, + IPriamInstanceFactory factory, + ITokenManager tokenManager, + InstanceInfo instanceInfo) { this.config = config; this.factory = factory; this.tokenManager = tokenManager; + this.instanceInfo = instanceInfo; } /** - * Doubling is done by pre-calculating all slots of a double ring and - * registering them. When new nodes come up, they will get the unsed token - * assigned per token logic. + * Doubling is done by pre-calculating all slots of a double ring and registering them. When new + * nodes come up, they will get the unsed token assigned per token logic. */ public void doubleSlots() { - List local = filteredRemote(factory.getAllIds(config.getAppName())); + Set local = getInstancesInSameRegion(); // delete all - for (PriamInstance data : local) - factory.delete(data); + for (PriamInstance data : local) factory.delete(data); - int hash = tokenManager.regionOffset(config.getDC()); + int hash = tokenManager.regionOffset(instanceInfo.getRegion()); // move existing slots. for (PriamInstance data : local) { int slot = (data.getId() - hash) * 2; - factory.create(data.getApp(), hash + slot, data.getInstanceId(), data.getHostName(), data.getHostIP(), data.getRac(), data.getVolumes(), data.getToken()); + factory.create( + data.getApp(), + hash + slot, + data.getInstanceId(), + data.getHostName(), + data.getHostIP(), + data.getRac(), + data.getVolumes(), + data.getToken()); } int new_ring_size = local.size() * 2; - for (PriamInstance data : filteredRemote(factory.getAllIds(config.getAppName()))) { + for (PriamInstance data : getInstancesInSameRegion()) { // if max then rotate. int currentSlot = data.getId() - hash; - int new_slot = currentSlot + 3 > new_ring_size ? (currentSlot + 3) - new_ring_size : currentSlot + 3; - String token = tokenManager.createToken(new_slot, new_ring_size, config.getDC()); - factory.create(data.getApp(), new_slot + hash, InstanceIdentity.DUMMY_INSTANCE_ID, config.getHostname(), config.getHostIP(), data.getRac(), null, token); + int new_slot = + currentSlot + 3 > new_ring_size + ? (currentSlot + 3) - new_ring_size + : currentSlot + 3; + String token = + tokenManager.createToken(new_slot, new_ring_size, instanceInfo.getRegion()); + factory.create( + data.getApp(), + new_slot + hash, + InstanceIdentity.DUMMY_INSTANCE_ID, + instanceInfo.getHostname(), + config.usePrivateIP() ? instanceInfo.getPrivateIP() : instanceInfo.getHostIP(), + data.getRac(), + null, + token); } } // filter other DC's - private List filteredRemote(List lst) { - List local = Lists.newArrayList(); - for (PriamInstance data : lst) - if (data.getDC().equals(config.getDC())) - local.add(data); - return local; + private Set getInstancesInSameRegion() { + return factory.getAllIds(config.getAppName()) + .stream() + .filter(i -> i.getDC().equals(instanceInfo.getRegion())) + .collect(Collectors.toSet()); } - /** - * Backup the current state in case of failure - */ + /** Backup the current state in case of failure */ public void backup() throws IOException { // writing to the backup file. TMP_BACKUP_FILE = File.createTempFile("Backup-instance-data", ".dat"); - OutputStream out = new FileOutputStream(TMP_BACKUP_FILE); - ObjectOutputStream stream = new ObjectOutputStream(out); - try { - stream.writeObject(filteredRemote(factory.getAllIds(config.getAppName()))); - logger.info("Wrote the backup of the instances to: {}", TMP_BACKUP_FILE.getAbsolutePath()); - } finally { - IOUtils.closeQuietly(stream); - IOUtils.closeQuietly(out); + try (ObjectOutputStream stream = + new ObjectOutputStream(new FileOutputStream(TMP_BACKUP_FILE))) { + stream.writeObject(getInstancesInSameRegion()); + logger.info( + "Wrote the backup of the instances to: {}", TMP_BACKUP_FILE.getAbsolutePath()); } } @@ -106,22 +121,26 @@ public void backup() throws IOException { * @throws ClassNotFoundException */ public void restore() throws IOException, ClassNotFoundException { - for (PriamInstance data : filteredRemote(factory.getAllIds(config.getAppName()))) - factory.delete(data); + for (PriamInstance data : getInstancesInSameRegion()) factory.delete(data); // read from the file. - InputStream in = new FileInputStream(TMP_BACKUP_FILE); - ObjectInputStream stream = new ObjectInputStream(in); - try { + try (ObjectInputStream stream = + new ObjectInputStream(new FileInputStream(TMP_BACKUP_FILE))) { @SuppressWarnings("unchecked") - List allInstances = (List) stream.readObject(); + Set allInstances = (Set) stream.readObject(); for (PriamInstance data : allInstances) - factory.create(data.getApp(), data.getId(), data.getInstanceId(), data.getHostName(), data.getHostIP(), data.getRac(), data.getVolumes(), data.getToken()); - logger.info("Sucecsfully restored the Instances from the backup: {}", TMP_BACKUP_FILE.getAbsolutePath()); - } finally { - IOUtils.closeQuietly(stream); - IOUtils.closeQuietly(in); + factory.create( + data.getApp(), + data.getId(), + data.getInstanceId(), + data.getHostName(), + data.getHostIP(), + data.getRac(), + data.getVolumes(), + data.getToken()); + logger.info( + "Successfully restored the Instances from the backup: {}", + TMP_BACKUP_FILE.getAbsolutePath()); } } - } diff --git a/priam/src/main/java/com/netflix/priam/identity/IMembership.java b/priam/src/main/java/com/netflix/priam/identity/IMembership.java index 6cbac34be..ed6f72b18 100644 --- a/priam/src/main/java/com/netflix/priam/identity/IMembership.java +++ b/priam/src/main/java/com/netflix/priam/identity/IMembership.java @@ -16,15 +16,14 @@ */ package com.netflix.priam.identity; +import com.google.common.collect.ImmutableSet; import com.google.inject.ImplementedBy; import com.netflix.priam.aws.AWSMembership; - import java.util.Collection; -import java.util.List; /** - * Interface to manage membership meta information such as size of RAC, list of - * nodes in RAC etc. Also perform ACL updates used in multi-regional clusters + * Interface to manage membership meta information such as size of RAC, list of nodes in RAC etc. + * Also perform ACL updates used in multi-regional clusters */ @ImplementedBy(AWSMembership.class) public interface IMembership { @@ -33,19 +32,17 @@ public interface IMembership { * * @return */ - List getRacMembership(); + ImmutableSet getRacMembership(); - /** - * @return Size of current RAC - */ + /** @return Size of current RAC */ int getRacMembershipSize(); /** - * Get a list of Instances in the cross-account but current RAC + * Get a set of Instances in the cross-account but current RAC * * @return */ - List getCrossAccountRacMembership(); + ImmutableSet getCrossAccountRacMembership(); /** * Number of RACs @@ -77,7 +74,7 @@ public interface IMembership { * * @return */ - List listACL(int from, int to); + ImmutableSet listACL(int from, int to); /** * Expand the membership size by 1. @@ -85,4 +82,4 @@ public interface IMembership { * @param count */ void expandRacMembership(int count); -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/identity/IPriamInstanceFactory.java b/priam/src/main/java/com/netflix/priam/identity/IPriamInstanceFactory.java index fc11a603c..af1e37d80 100644 --- a/priam/src/main/java/com/netflix/priam/identity/IPriamInstanceFactory.java +++ b/priam/src/main/java/com/netflix/priam/identity/IPriamInstanceFactory.java @@ -16,27 +16,28 @@ */ package com.netflix.priam.identity; +import com.google.common.collect.ImmutableSet; import com.google.inject.ImplementedBy; import com.netflix.priam.aws.SDBInstanceFactory; - -import java.util.List; import java.util.Map; /** - * Interface for managing Cassandra instance data. Provides functionality - * to register, update, delete or list instances from the registry + * Interface for managing Cassandra instance data. Provides functionality to register, update, + * delete or list instances from the registry */ @ImplementedBy(SDBInstanceFactory.class) -public interface IPriamInstanceFactory { +public interface IPriamInstanceFactory { /** * Return a list of all Cassandra server nodes registered. + * * @param appName the cluster name * @return a list of all nodes in {@code appName} */ - List getAllIds(String appName); + ImmutableSet getAllIds(String appName); /** * Return the Cassandra server node with the given {@code id}. + * * @param appName the cluster name * @param id the node id * @return the node with the given {@code id}, or {@code null} if none found @@ -45,6 +46,7 @@ public interface IPriamInstanceFactory { /** * Create/Register an instance of the server with its info. + * * @param app * @param id * @param instanceID @@ -55,31 +57,28 @@ public interface IPriamInstanceFactory { * @param token * @return the new node */ - PriamInstance create(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String token); + PriamInstance create( + String app, + int id, + String instanceID, + String hostname, + String ip, + String rac, + Map volumes, + String token); /** * Delete the server node from the registry + * * @param inst the node to delete */ void delete(PriamInstance inst); /** * Update the details of the server node in registry - * @param inst the node to update - */ - void update(PriamInstance inst); - - /** - * Sort the list by instance ID - * @param return_ the list of nodes to sort - */ - void sort(List return_); - - /** - * Attach volumes if required - * @param instance - * @param mountPath - * @param device + * + * @param orig the values that should exist in the database for the update to succeed + * @param inst the new values */ - void attachVolumes(PriamInstance instance, String mountPath, String device); -} \ No newline at end of file + void update(PriamInstance orig, PriamInstance inst); +} diff --git a/priam/src/main/java/com/netflix/priam/identity/InstanceEnvIdentity.java b/priam/src/main/java/com/netflix/priam/identity/InstanceEnvIdentity.java deleted file mode 100644 index e0441fe14..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/InstanceEnvIdentity.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity; - -import com.google.inject.ImplementedBy; - -/* - * A means to determine the environment for the running instance - */ -@ImplementedBy(AwsInstanceEnvIdentity.class) -public interface InstanceEnvIdentity { - /* - * @return true if running instance is in "classic", false otherwise. - */ - Boolean isClassic(); - - /* - * @return true if running instance is in vpc, under your default AWS account, false otherwise. - */ - Boolean isDefaultVpc(); - - /* - * @return true if running instance is in vpc, under a specific AWS account, false otherwise. - */ - Boolean isNonDefaultVpc(); - - enum InstanceEnvironent { - CLASSIC, DEFAULT_VPC, NONDEFAULT_VPC - } - -} diff --git a/priam/src/main/java/com/netflix/priam/identity/InstanceIdentity.java b/priam/src/main/java/com/netflix/priam/identity/InstanceIdentity.java index 7071e8511..458420e91 100644 --- a/priam/src/main/java/com/netflix/priam/identity/InstanceIdentity.java +++ b/priam/src/main/java/com/netflix/priam/identity/InstanceIdentity.java @@ -17,7 +17,6 @@ package com.netflix.priam.identity; import com.google.common.base.Predicate; -import com.google.common.base.Supplier; import com.google.common.collect.Iterables; import com.google.common.collect.ListMultimap; import com.google.common.collect.Lists; @@ -25,259 +24,119 @@ import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.token.IDeadTokenRetriever; -import com.netflix.priam.identity.token.INewTokenRetriever; -import com.netflix.priam.identity.token.IPreGeneratedTokenRetriever; -import com.netflix.priam.utils.ITokenManager; -import com.netflix.priam.utils.RetryableCallable; -import com.netflix.priam.utils.Sleeper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.UnknownHostException; -import java.util.Collection; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.identity.token.ITokenRetriever; import java.util.HashMap; import java.util.LinkedList; import java.util.List; /** - * This class provides the central place to create and consume the identity of - * the instance - token, seeds etc. - * + * This class provides the central place to create and consume the identity of the instance - token, + * seeds etc. */ @Singleton public class InstanceIdentity { - private static final Logger logger = LoggerFactory.getLogger(InstanceIdentity.class); public static final String DUMMY_INSTANCE_ID = "new_slot"; - private final ListMultimap locMap = Multimaps.newListMultimap(new HashMap>(), new Supplier>() { - public List get() { - return Lists.newArrayList(); - } - }); - private final IPriamInstanceFactory factory; + private final ListMultimap locMap = + Multimaps.newListMultimap(new HashMap<>(), Lists::newArrayList); + private final IPriamInstanceFactory factory; private final IMembership membership; private final IConfiguration config; - private final Sleeper sleeper; - private final ITokenManager tokenManager; - - private final Predicate differentHostPredicate = new Predicate() { - @Override - /** - * This is used to provide the list of seed providers. - * Since 3.x backported the @see CASSANDRA-10134 we need to ensure that seed list contains all the seed(including itself) or cluster would never come up. - */ - public boolean apply(PriamInstance instance) { - return (!instance.getInstanceId().equalsIgnoreCase(DUMMY_INSTANCE_ID)); - } - @Override - public boolean test(PriamInstance input) { - return apply(input); - } - }; + private final Predicate differentHostPredicate = + new Predicate() { + @Override + public boolean apply(PriamInstance instance) { + if (config.getAutoBoostrap()) { + // auto_bootstrap = true indicates that the cluster is up and running + // normally, in such a case + // we cannot provide the local instance as a seed otherwise we can bootstrap + // nodes with no data + return (!instance.getInstanceId().equalsIgnoreCase(DUMMY_INSTANCE_ID) + && !instance.getHostName().equals(myInstance.getHostName())); + } else { + // auto_bootstrap = false indicates a freshly provisioned cluster. Some + // nodes in such a cluster must + // provide itself as a seed due to the changes in CASSANDRA-10134 which made + // it so the cluster would + // not start up when auto_bootstrap was false. This is because in 3.11 + // failing the shadow round + // (which will happen on bootup by definition) is acceptable for seeds, but + // not for non seeds + return (!instance.getInstanceId().equalsIgnoreCase(DUMMY_INSTANCE_ID)); + } + } + }; private PriamInstance myInstance; - private boolean isReplace = false; - private boolean isTokenPregenerated = false; - private String replacedIp = ""; - private IDeadTokenRetriever deadTokenRetriever; - private IPreGeneratedTokenRetriever preGeneratedTokenRetriever; - private INewTokenRetriever newTokenRetriever; + // Instance information contains other information like ASG/vpc-id etc. + private InstanceInfo myInstanceInfo; + private boolean isReplace; + private boolean isTokenPregenerated; + private String replacedIp; @Inject - //Note: do not parameterized the generic type variable to an implementation as it confuses Guice in the binding. - public InstanceIdentity(IPriamInstanceFactory factory, IMembership membership, IConfiguration config, - Sleeper sleeper, ITokenManager tokenManager - , IDeadTokenRetriever deadTokenRetriever - , IPreGeneratedTokenRetriever preGeneratedTokenRetriever - , INewTokenRetriever newTokenRetriever - ) throws Exception { + // Note: do not parameterized the generic type variable to an implementation as + // it confuses + // Guice in the binding. + public InstanceIdentity( + IPriamInstanceFactory factory, + IMembership membership, + IConfiguration config, + InstanceInfo instanceInfo, + ITokenRetriever tokenRetriever) + throws Exception { this.factory = factory; this.membership = membership; this.config = config; - this.sleeper = sleeper; - this.tokenManager = tokenManager; - this.deadTokenRetriever = deadTokenRetriever; - this.preGeneratedTokenRetriever = preGeneratedTokenRetriever; - this.newTokenRetriever = newTokenRetriever; - init(); + this.myInstanceInfo = instanceInfo; + this.myInstance = tokenRetriever.get(); + this.replacedIp = tokenRetriever.getReplacedIp().orElse(null); + this.isReplace = replacedIp != null; + this.isTokenPregenerated = tokenRetriever.isTokenPregenerated(); } public PriamInstance getInstance() { return myInstance; } - public void init() throws Exception { - // try to grab the token which was already assigned - myInstance = new RetryableCallable() { - @Override - public PriamInstance retriableCall() throws Exception { - // Check if this node is decomissioned - List deadInstances = factory.getAllIds(config.getAppName() + "-dead"); - for (PriamInstance ins : deadInstances) { - logger.info("[Dead] Iterating though the hosts: {}", ins.getInstanceId()); - if (ins.getInstanceId().equals(config.getInstanceName())) { - ins.setOutOfService(true); - logger.info("[Dead] found that this node is dead." - + " application: {}" - + ", id: {}" - + ", instance: {}" - + ", region: {}" - + ", host ip: {}" - + ", host name: {}" - + ", token: {}", - ins.getApp(), ins.getId(), ins.getInstanceId(), - ins.getDC(), ins.getHostIP(), ins.getHostName(), - ins.getToken()); - return ins; - } - } - List aliveInstances = factory.getAllIds(config.getAppName()); - for (PriamInstance ins : aliveInstances) { - logger.info("[Alive] Iterating though the hosts: {} My id = [{}]", ins.getInstanceId(), ins.getId()); - if (ins.getInstanceId().equals(config.getInstanceName())) { - logger.info("[Alive] found that this node is alive." - + " application: {}" - + ", id: {}" - + ", instance: {}" - + ", region: {}" - + ", host ip: {}" - + ", host name: {}" - + ", token: {}", - ins.getApp(), ins.getId(), ins.getInstanceId(), - ins.getDC(), ins.getHostIP(), ins.getHostName(), - ins.getToken()); - return ins; - } - - } - return null; - } - }.call(); - - // Grab a dead token - if (null == myInstance) { - myInstance = new RetryableCallable() { - - @Override - public PriamInstance retriableCall() throws Exception { - PriamInstance result = null; - result = deadTokenRetriever.get(); - if (result != null) { - - isReplace = true; //indicate that we are acquiring a dead instance's token - - if (deadTokenRetriever.getReplaceIp() != null) { //The IP address of the dead instance to which we will acquire its token - replacedIp = deadTokenRetriever.getReplaceIp(); - } - - } - - return result; - } - - @Override - public void forEachExecution() { - populateRacMap(); - deadTokenRetriever.setLocMap(locMap); - } - - }.call(); - } - - - // Grab a pre-generated token if there is such one - if (null == myInstance) { - - myInstance = new RetryableCallable() { - - @Override - public PriamInstance retriableCall() throws Exception { - PriamInstance result = null; - result = preGeneratedTokenRetriever.get(); - if (result != null) { - isTokenPregenerated = true; - } - return result; - } - - @Override - public void forEachExecution() { - populateRacMap(); - preGeneratedTokenRetriever.setLocMap(locMap); - } - - }.call(); - - } - - - // Grab a new token - if (null == myInstance) { - - if (this.config.isCreateNewTokenEnable()) { - - myInstance = new RetryableCallable() { - - @Override - public PriamInstance retriableCall() throws Exception { - super.set(100, 100); - newTokenRetriever.setLocMap(locMap); - return newTokenRetriever.get(); - } - - @Override - public void forEachExecution() { - populateRacMap(); - newTokenRetriever.setLocMap(locMap); - } - - }.call(); - - } else { - throw new IllegalStateException("Node attempted to erroneously create a new token when we should be grabbing an existing token."); - } - - } - - logger.info("My token: {}", myInstance.getToken()); + public InstanceInfo getInstanceInfo() { + return myInstanceInfo; } private void populateRacMap() { locMap.clear(); - List instances = factory.getAllIds(config.getAppName()); - for (PriamInstance ins : instances) { - locMap.put(ins.getRac(), ins); - } + factory.getAllIds(config.getAppName()).forEach(ins -> locMap.put(ins.getRac(), ins)); } - public List getSeeds() throws UnknownHostException { + public List getSeeds() { populateRacMap(); - List seeds = new LinkedList(); + List seeds = new LinkedList<>(); // Handle single zone deployment if (config.getRacs().size() == 1) { // Return empty list if all nodes are not up if (membership.getRacMembershipSize() != locMap.get(myInstance.getRac()).size()) return seeds; // If seed node, return the next node in the list - if (locMap.get(myInstance.getRac()).size() > 1 && locMap.get(myInstance.getRac()).get(0).getHostIP().equals(myInstance.getHostIP())) { + if (locMap.get(myInstance.getRac()).size() > 1 + && locMap.get(myInstance.getRac()) + .get(0) + .getHostIP() + .equals(myInstance.getHostIP())) { PriamInstance instance = locMap.get(myInstance.getRac()).get(1); if (instance != null && !isInstanceDummy(instance)) { - if (config.isMultiDC()) - seeds.add(instance.getHostIP()); - else - seeds.add(instance.getHostName()); + if (config.isMultiDC()) seeds.add(instance.getHostIP()); + else seeds.add(instance.getHostName()); } } } for (String loc : locMap.keySet()) { - PriamInstance instance = Iterables.tryFind(locMap.get(loc), differentHostPredicate).orNull(); + PriamInstance instance = + Iterables.tryFind(locMap.get(loc), differentHostPredicate).orNull(); if (instance != null && !isInstanceDummy(instance)) { - if (config.isMultiDC()) - seeds.add(instance.getHostIP()); - else - seeds.add(instance.getHostName()); + if (config.isMultiDC()) seeds.add(instance.getHostIP()); + else seeds.add(instance.getHostName()); } } return seeds; @@ -301,7 +160,12 @@ public String getReplacedIp() { return replacedIp; } + public void setReplacedIp(String replacedIp) { + this.replacedIp = replacedIp; + if (!replacedIp.isEmpty()) this.isReplace = true; + } + private static boolean isInstanceDummy(PriamInstance instance) { return instance.getInstanceId().equals(DUMMY_INSTANCE_ID); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/identity/PriamInstance.java b/priam/src/main/java/com/netflix/priam/identity/PriamInstance.java index 90d8d2287..5c5b7eb54 100644 --- a/priam/src/main/java/com/netflix/priam/identity/PriamInstance.java +++ b/priam/src/main/java/com/netflix/priam/identity/PriamInstance.java @@ -16,11 +16,10 @@ */ package com.netflix.priam.identity; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.Serializable; import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class PriamInstance implements Serializable { private static final long serialVersionUID = 5606412386974488659L; @@ -36,7 +35,7 @@ public class PriamInstance implements Serializable { private String publicip; private String location; private String token; - //Handles Storage objects + // Handles Storage objects private Map volumes; public String getApp() { @@ -110,8 +109,9 @@ public void setVolumes(Map volumes) { @Override public String toString() { - return String.format("Hostname: %s, InstanceId: %s, APP_NAME: %s, RAC : %s Location %s, Id: %s: Token: %s", getHostName(), getInstanceId(), getApp(), getRac(), getDC(), getId(), - getToken()); + return String.format( + "Hostname: %s, InstanceId: %s, APP_NAME: %s, RAC : %s, Location %s, Id: %s: Token: %s, IP: %s", + hostname, instanceId, app, availabilityZone, location, Id, token, publicip); } public String getDC() { @@ -134,9 +134,8 @@ public boolean isOutOfService() { return outOfService; } - public void setOutOfService(boolean outOfService) { - this.outOfService = outOfService; + public PriamInstance setOutOfService() { + this.outOfService = true; + return this; } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/identity/config/AWSInstanceInfo.java b/priam/src/main/java/com/netflix/priam/identity/config/AWSInstanceInfo.java new file mode 100644 index 000000000..0a74522da --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/config/AWSInstanceInfo.java @@ -0,0 +1,208 @@ +/** + * Copyright 2017 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.priam.identity.config; + +import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; +import com.amazonaws.services.ec2.model.*; +import com.amazonaws.util.EC2MetadataUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import com.google.inject.Singleton; +import com.netflix.priam.cred.ICredential; +import com.netflix.priam.utils.RetryableCallable; +import java.util.List; +import org.apache.commons.lang3.StringUtils; +import org.codehaus.jettison.json.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Singleton +public class AWSInstanceInfo implements InstanceInfo { + private static final Logger logger = LoggerFactory.getLogger(AWSInstanceInfo.class); + + static final String PUBLIC_HOSTNAME_URL = "/latest/meta-data/public-hostname"; + static final String LOCAL_HOSTNAME_URL = "/latest/meta-data/local-hostname"; + static final String PUBLIC_HOSTIP_URL = "/latest/meta-data/public-ipv4"; + static final String LOCAL_HOSTIP_URL = "/latest/meta-data/local-ipv4"; + + private JSONObject identityDocument = null; + private String privateIp; + private String hostIP; + private String rac; + private String hostName; + private String instanceId; + private String instanceType; + private String mac; + private String region; + private String availabilityZone; + private ICredential credential; + private String vpcId; + private InstanceEnvironment instanceEnvironment; + + @Inject + public AWSInstanceInfo(ICredential credential) { + this.credential = credential; + } + + @Override + public String getPrivateIP() { + if (privateIp == null) { + privateIp = EC2MetadataUtils.getPrivateIpAddress(); + } + return privateIp; + } + + @Override + public String getRac() { + if (rac == null) { + rac = EC2MetadataUtils.getAvailabilityZone(); + } + return rac; + } + + @Override + public List getDefaultRacks() { + // Get the fist 3 available zones in the region + AmazonEC2 client = + AmazonEC2ClientBuilder.standard() + .withCredentials(credential.getAwsCredentialProvider()) + .withRegion(getRegion()) + .build(); + DescribeAvailabilityZonesResult res = client.describeAvailabilityZones(); + List zone = Lists.newArrayList(); + for (AvailabilityZone reg : res.getAvailabilityZones()) { + if (reg.getState().equals("available")) zone.add(reg.getZoneName()); + if (zone.size() == 3) break; + } + return ImmutableList.copyOf(zone); + } + + @Override + public String getInstanceId() { + if (instanceId == null) { + instanceId = EC2MetadataUtils.getInstanceId(); + } + return instanceId; + } + + @Override + public String getInstanceType() { + if (instanceType == null) { + instanceType = EC2MetadataUtils.getInstanceType(); + } + return instanceType; + } + + private String getMac() { + if (mac == null) { + mac = EC2MetadataUtils.getNetworkInterfaces().get(0).getMacAddress(); + } + return mac; + } + + @Override + public String getRegion() { + if (region == null) { + region = EC2MetadataUtils.getEC2InstanceRegion(); + } + return region; + } + + @Override + public String getVpcId() { + String nacId = getMac(); + if (StringUtils.isEmpty(nacId)) return null; + + if (vpcId == null) + try { + vpcId = EC2MetadataUtils.getNetworkInterfaces().get(0).getVpcId(); + } catch (Exception e) { + logger.info( + "Vpc id does not exist for running instance, not fatal as running instance maybe not be in vpc. Msg: {}", + e.getLocalizedMessage()); + } + + return vpcId; + } + + @Override + public String getAutoScalingGroup() { + final AmazonEC2 client = + AmazonEC2ClientBuilder.standard() + .withCredentials(credential.getAwsCredentialProvider()) + .withRegion(getRegion()) + .build(); + try { + return new RetryableCallable(15, 30000) { + public String retriableCall() throws IllegalStateException { + DescribeInstancesRequest desc = + new DescribeInstancesRequest().withInstanceIds(getInstanceId()); + DescribeInstancesResult res = client.describeInstances(desc); + + for (Reservation resr : res.getReservations()) { + for (Instance ins : resr.getInstances()) { + for (com.amazonaws.services.ec2.model.Tag tag : ins.getTags()) { + if (tag.getKey().equals("aws:autoscaling:groupName")) + return tag.getValue(); + } + } + } + + throw new IllegalStateException("Couldn't determine ASG name"); + } + }.call(); + } catch (Exception e) { + logger.error("Failed to determine ASG name.", e); + return null; + } + } + + @Override + public InstanceEnvironment getInstanceEnvironment() { + if (instanceEnvironment == null) { + instanceEnvironment = + (getVpcId() == null) ? InstanceEnvironment.CLASSIC : InstanceEnvironment.VPC; + } + return instanceEnvironment; + } + + @Override + public String getHostname() { + if (hostName == null) { + String publicHostName = tryGetDataFromUrl(PUBLIC_HOSTNAME_URL); + hostName = + publicHostName == null ? tryGetDataFromUrl(LOCAL_HOSTNAME_URL) : publicHostName; + } + return hostName; + } + + @Override + public String getHostIP() { + if (hostIP == null) { + String publicHostIP = tryGetDataFromUrl(PUBLIC_HOSTIP_URL); + hostIP = publicHostIP == null ? tryGetDataFromUrl(LOCAL_HOSTIP_URL) : publicHostIP; + } + return hostIP; + } + + String tryGetDataFromUrl(String url) { + try { + return EC2MetadataUtils.getData(url); + } catch (Exception e) { + return null; + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/identity/config/AWSVpcInstanceDataRetriever.java b/priam/src/main/java/com/netflix/priam/identity/config/AWSVpcInstanceDataRetriever.java deleted file mode 100644 index f4255b217..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/config/AWSVpcInstanceDataRetriever.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.config; - -import com.netflix.priam.utils.SystemUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Calls AWS metadata to get info on the location of the running instance within vpc environment. - * - */ -public class AWSVpcInstanceDataRetriever extends InstanceDataRetrieverBase{ - private static final Logger logger = LoggerFactory.getLogger(AWSVpcInstanceDataRetriever.class); - @Override - public String getVpcId() { - String nacId = getMac(); - if (nacId == null || nacId.isEmpty()) - return null; - - String vpcId = null; - try { - vpcId = SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/network/interfaces/macs/" + nacId + "vpc-id").trim(); - } catch (Exception e) { - logger.info("Vpc id does not exist for running instance, not fatal as running instance maybe not be in vpc. Msg: {}", e.getLocalizedMessage()); - } - - return vpcId; - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/identity/config/AwsClassicInstanceDataRetriever.java b/priam/src/main/java/com/netflix/priam/identity/config/AwsClassicInstanceDataRetriever.java deleted file mode 100644 index e975c2830..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/config/AwsClassicInstanceDataRetriever.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.config; - -/** - * Calls AWS metadata to get info on the location of the running instance within classic environment. - * - */ - -public class AwsClassicInstanceDataRetriever extends InstanceDataRetrieverBase { - @Override - public String getVpcId() { - throw new UnsupportedOperationException("Not applicable as running instance is in classic environment"); - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetriever.java b/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetriever.java deleted file mode 100644 index 6f2f68204..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetriever.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.config; - -import org.codehaus.jettison.json.JSONException; - -/** - * A means to fetch meta data of running instance - */ -public interface InstanceDataRetriever { - /** - * Get the availability zone of the running instance. - * @return the availability zone of the running instance. e.g. us-east-1c - */ - String getRac(); - - /** - * Get the public hostname for the running instance. - * @return the public hostname for the running instance. e.g.: ec2-12-34-56-78.compute-1.amazonaws.com - */ - String getPublicHostname(); - - /** - * Get public ip address for running instance. Can be null. - * @return private ip address for running instance. - */ - String getPublicIP(); - - /** - * Get private ip address for running instance. - * @return private ip address for running instance. - */ - String getPrivateIP(); - - /** - * Get the instance id of the running instance. - * @return the instance id of the running instance. e.g. i-07a88a49ff155353 - */ - String getInstanceId(); - - /** - * Get the instance type of the running instance. - * @return the instance type of the running instance. e.g. i3.2xlarge - */ - String getInstanceType(); - - /** - * Get the id of the network interface for running instance. - * @return id of the network interface for running instance - */ - String getMac(); - - /** - * Get the id of the vpc account for running instance. - * @return the id of the vpc account for running instance, null if does not exist. - */ - String getVpcId(); //the id of the vpc for running instance - - /** - * AWS Account ID of running instance. - * @return the id (e.g. 12345) of the AWS account of running instance, could be null /empty. - * @throws JSONException - */ - String getAWSAccountId() throws JSONException; - - /** - * Get the region of the AWS account of running instance - * @return the region (e.g. us-east-1) of the AWS account of running instance, could be null /empty. - * @throws JSONException - */ - String getRegion() throws JSONException; - - /** - * Get the availability zone of the running instance. - * @return the availability zone of the running instance. e.g. us-east-1c - * @throws JSONException - */ - String getAvailabilityZone() throws JSONException; -} diff --git a/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetrieverBase.java b/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetrieverBase.java deleted file mode 100644 index 2b9598a40..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/config/InstanceDataRetrieverBase.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.config; - -import com.netflix.priam.utils.SystemUtils; -import org.codehaus.jettison.json.JSONException; -import org.codehaus.jettison.json.JSONObject; - -public abstract class InstanceDataRetrieverBase implements InstanceDataRetriever{ - protected JSONObject identityDocument = null; - - public String getPrivateIP(){ - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/local-ipv4"); - } - - public String getRac() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/placement/availability-zone"); - } - - public String getPublicHostname() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-hostname"); - } - - public String getPublicIP() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/public-ipv4"); - } - - public String getInstanceId() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-id"); - } - - public String getInstanceType() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/instance-type"); - } - - public String getMac() { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/network/interfaces/macs/").trim(); - } - - public String getAWSAccountId() throws JSONException { - if (this.identityDocument == null) { - String jsonStr = SystemUtils.getDataFromUrl("http://169.254.169.254/latest/dynamic/instance-identity/document"); - this.identityDocument = new JSONObject(jsonStr); - } - return this.identityDocument.getString("accountId"); - } - - public String getRegion() throws JSONException { - if (this.identityDocument == null) { - String jsonStr = SystemUtils.getDataFromUrl("http://169.254.169.254/latest/dynamic/instance-identity/document"); - this.identityDocument = new JSONObject(jsonStr); - } - return this.identityDocument.getString("region"); - } - - public String getAvailabilityZone() throws JSONException { - return SystemUtils.getDataFromUrl("http://169.254.169.254/latest/meta-data/placement/availability-zone"); - } -} diff --git a/priam/src/main/java/com/netflix/priam/identity/config/InstanceInfo.java b/priam/src/main/java/com/netflix/priam/identity/config/InstanceInfo.java new file mode 100644 index 000000000..f368e6c4b --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/config/InstanceInfo.java @@ -0,0 +1,112 @@ +/** + * Copyright 2017 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.priam.identity.config; + +import com.google.common.collect.ImmutableList; +import com.google.inject.ImplementedBy; +import com.netflix.priam.config.IConfiguration; +import java.util.List; + +/** A means to fetch meta data of running instance */ +@ImplementedBy(AWSInstanceInfo.class) +public interface InstanceInfo { + /** + * Get the availability zone of the running instance. + * + * @return the availability zone of the running instance. e.g. us-east-1c + */ + String getRac(); + + /** + * Get the list of default racks available for this DC. This is used if no value is configured + * for {@link IConfiguration#getRacs()} + * + * @return list of default racks. + */ + default List getDefaultRacks() { + return ImmutableList.of(getRac()); + } + + /** + * Get the hostname for the running instance. Cannot be null. + * + * @return the public hostname for the running instance. e.g.: + * ec2-12-34-56-78.compute-1.amazonaws.com, if available. Else return private ip address for + * running instance. + */ + String getHostname(); + + /** + * Get ip address for running instance. Cannot be null. + * + * @return public ip if one is provided or private ip address for running instance. + */ + String getHostIP(); + + /** + * Get private ip address for running instance. + * + * @return private ip address for running instance. + */ + String getPrivateIP(); + + /** + * Get the instance id of the running instance. + * + * @return the instance id of the running instance. e.g. i-07a88a49ff155353 + */ + String getInstanceId(); + + /** + * Get the instance type of the running instance. + * + * @return the instance type of the running instance. e.g. i3.2xlarge + */ + String getInstanceType(); + + /** + * Get the id of the vpc account for running instance. + * + * @return the id of the vpc account for running instance, null if does not exist. + */ + String getVpcId(); // the id of the vpc for running instance + + /** + * Get the region/data center of the running instance + * + * @return the region of the running instance, could be null/empty (e.g. us-east-1) + */ + String getRegion(); + + /** + * Get the ASG in which this instance is deployed. Note that Priam requires instances to be + * under an ASG. + * + * @return the ASG of the instance. ex: cassandra_app--useast1e + */ + String getAutoScalingGroup(); + + /** + * Environment of the current running instance. AWS only allows VPC environment (default). + * Classic is deprecated environment by AWS. + * + * @return Environment of the current running instance. + */ + InstanceEnvironment getInstanceEnvironment(); + + enum InstanceEnvironment { + CLASSIC, + VPC + } +} diff --git a/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceDataRetriever.java b/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceDataRetriever.java deleted file mode 100644 index 028e655a7..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceDataRetriever.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.config; - -import org.codehaus.jettison.json.JSONException; - -/** - * Looks at local (system) properties for metadata about the running 'instance'. - * Typically, this is used for locally-deployed testing. - */ -public class LocalInstanceDataRetriever implements InstanceDataRetriever { - private static final String PREFIX = "Priam.localInstance."; - - public String getRac() { - return System.getProperty(PREFIX + "availabilityZone", ""); - } - - public String getPublicHostname() { - return System.getProperty(PREFIX + "publicHostname", ""); - } - - public String getPublicIP() { - return System.getProperty(PREFIX + "publicIp", ""); - } - - @Override - public String getPrivateIP() { - return System.getProperty(PREFIX + "privateIp", ""); - } - - public String getInstanceId() { - return System.getProperty(PREFIX + "instanceId", ""); - } - - public String getInstanceType() { - return System.getProperty(PREFIX + "instanceType", ""); - } - - @Override - public String getMac() { - return System.getProperty(PREFIX + "networkinterface", ""); - } - - @Override - public String getVpcId() { - return System.getProperty(PREFIX + "vpcid", ""); - } - - @Override - public String getAWSAccountId() throws JSONException { - return System.getProperty(PREFIX + "awsacctid", ""); - } - - @Override - public String getAvailabilityZone() throws JSONException { - return System.getProperty(PREFIX + "availabilityzone", ""); - } - - @Override - public String getRegion() throws JSONException { - return System.getProperty(PREFIX + "region", ""); - } -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceInfo.java b/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceInfo.java new file mode 100644 index 000000000..be89bb918 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/config/LocalInstanceInfo.java @@ -0,0 +1,72 @@ +/** + * Copyright 2017 Netflix, Inc. + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.priam.identity.config; + +/** + * Looks at local (system) properties for metadata about the running 'instance'. Typically, this is + * used for locally-deployed testing. + */ +public class LocalInstanceInfo implements InstanceInfo { + private static final String PREFIX = "Priam.localInstance."; + + @Override + public String getRac() { + return System.getProperty(PREFIX + "availabilityZone", ""); + } + + @Override + public String getHostname() { + return System.getProperty(PREFIX + "privateIp", ""); + } + + @Override + public String getHostIP() { + return System.getProperty(PREFIX + "privateIp", ""); + } + + @Override + public String getPrivateIP() { + return System.getProperty(PREFIX + "privateIp", ""); + } + + @Override + public String getInstanceId() { + return System.getProperty(PREFIX + "instanceId", ""); + } + + @Override + public String getInstanceType() { + return System.getProperty(PREFIX + "instanceType", ""); + } + + @Override + public String getVpcId() { + return System.getProperty(PREFIX + "vpcid", ""); + } + + @Override + public String getAutoScalingGroup() { + return System.getProperty(PREFIX + "asg", ""); + } + + @Override + public InstanceEnvironment getInstanceEnvironment() { + return (getVpcId() == null) ? InstanceEnvironment.CLASSIC : InstanceEnvironment.VPC; + } + + @Override + public String getRegion() { + return System.getProperty(PREFIX + "region", ""); + } +} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/DeadTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/DeadTokenRetriever.java deleted file mode 100755 index 75bf4b730..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/DeadTokenRetriever.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.InstanceEnvIdentity; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.utils.Sleeper; -import com.sun.jersey.api.client.Client; -import com.sun.jersey.api.client.ClientResponse; -import com.sun.jersey.api.client.WebResource; -import com.sun.jersey.api.client.config.ClientConfig; -import com.sun.jersey.api.client.config.DefaultClientConfig; -import org.apache.commons.lang3.StringUtils; -import org.json.simple.JSONObject; -import org.json.simple.parser.JSONParser; -import org.json.simple.parser.ParseException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.ws.rs.core.MediaType; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Random; - -public class DeadTokenRetriever extends TokenRetrieverBase implements IDeadTokenRetriever { - private static final Logger logger = LoggerFactory.getLogger(DeadTokenRetriever.class); - private IPriamInstanceFactory factory; - private IMembership membership; - private IConfiguration config; - private Sleeper sleeper; - private String replacedIp; //The IP address of the dead instance to which we will acquire its token - private ListMultimap locMap; - private InstanceEnvIdentity insEnvIdentity; - - - @Inject - public DeadTokenRetriever(IPriamInstanceFactory factory, IMembership membership, IConfiguration config, - Sleeper sleeper, InstanceEnvIdentity insEnvIdentity) { - this.factory = factory; - this.membership = membership; - this.config = config; - this.sleeper = sleeper; - this.insEnvIdentity = insEnvIdentity; - } - - private List getDualAccountRacMembership(List asgInstances) { - logger.info("Dual Account cluster"); - - List crossAccountAsgInstances = membership.getCrossAccountRacMembership(); - - if (logger.isInfoEnabled()) { - if (insEnvIdentity.isClassic()) { - logger.info("EC2 classic instances (local ASG): " + Arrays.toString(asgInstances.toArray())); - logger.info("VPC Account (cross-account ASG): " + Arrays.toString(crossAccountAsgInstances.toArray())); - } else { - logger.info("VPC Account (local ASG): " + Arrays.toString(asgInstances.toArray())); - logger.info("EC2 classic instances (cross-account ASG): " + Arrays.toString(crossAccountAsgInstances.toArray())); - } - } - - // Remove duplicates (probably there are not) - asgInstances.removeAll(crossAccountAsgInstances); - - // Merge the two lists - asgInstances.addAll(crossAccountAsgInstances); - logger.info("Combined Instances in the AZ: {}", asgInstances); - - return asgInstances; - } - - @Override - public PriamInstance get() throws Exception { - - logger.info("Looking for a token from any dead node"); - final List allIds = factory.getAllIds(config.getAppName()); - List asgInstances = membership.getRacMembership(); - if (config.isDualAccount()) { - asgInstances = getDualAccountRacMembership(asgInstances); - } else { - logger.info("Single Account cluster"); - } - - // Sleep random interval - upto 15 sec - sleeper.sleep(new Random().nextInt(5000) + 10000); - for (PriamInstance dead : allIds) { - // test same zone and is it is alive. - if (!dead.getRac().equals(config.getRac()) || asgInstances.contains(dead.getInstanceId()) || super.isInstanceDummy(dead)) - continue; - logger.info("Found dead instances: {}", dead.getInstanceId()); - PriamInstance markAsDead = factory.create(dead.getApp() + "-dead", dead.getId(), dead.getInstanceId(), dead.getHostName(), dead.getHostIP(), dead.getRac(), dead.getVolumes(), - dead.getToken()); - // remove it as we marked it down... - factory.delete(dead); - - //find the replaced IP - this.replacedIp = findReplaceIp(allIds, markAsDead.getToken(), markAsDead.getDC()); - if (this.replacedIp == null) - this.replacedIp = markAsDead.getHostIP(); - - String payLoad = markAsDead.getToken(); - logger.info("Trying to grab slot {} with availability zone {}", markAsDead.getId(), markAsDead.getRac()); - return factory.create(config.getAppName(), markAsDead.getId(), config.getInstanceName(), config.getHostname(), config.getHostIP(), config.getRac(), markAsDead.getVolumes(), payLoad); - - } - - return null; - } - - @Override - public String getReplaceIp() { - return this.replacedIp; - } - - private String findReplaceIp(List allIds, String token, String location) { - String ip = null; - for (PriamInstance ins : allIds) { - logger.info("Calling getIp on hostname[{}] and token[{}]", ins.getHostName(), token); - if (ins.getToken().equals(token) || !ins.getDC().equals(location)) { //avoid using dead instance and other regions' instances - continue; - } - - try { - ip = getIp(ins.getHostName(), token); - } catch (ParseException e) { - ip = null; - } - - if (ip != null) { - logger.info("Found the IP: {}", ip); - return ip; - } - } - - return null; - } - - private String getIp(String host, String token) throws ParseException { - ClientConfig config = new DefaultClientConfig(); - Client client = Client.create(config); - String baseURI = getBaseURI(host); - WebResource service = client.resource(baseURI); - - ClientResponse clientResp; - String textEntity = null; - - try { - clientResp = service.path("Priam/REST/v1/cassadmin/gossipinfo").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); - - if (clientResp.getStatus() != 200) - return null; - - textEntity = clientResp.getEntity(String.class); - - logger.info("Respond from calling gossipinfo on host[{}] and token[{}] : {}", host, token, textEntity); - - if (StringUtils.isEmpty(textEntity)) - return null; - } catch (Exception e) { - logger.info("Error in reaching out to host: {}", baseURI); - return null; - } - - JSONParser parser = new JSONParser(); - Object obj = parser.parse(textEntity); - - JSONObject jsonObject = (JSONObject) obj; - - Iterator iter = jsonObject.keySet().iterator(); - - while (iter.hasNext()) { - Object key = iter.next(); - JSONObject msg = (JSONObject) jsonObject.get(key); - if (msg.get("Token") == null) { - continue; - } - String tokenVal = (String) msg.get("Token"); - - if (token.equals(tokenVal)) { - logger.info("Using gossipinfo from host[{}] and token[{}], the replaced address is : {}", host, token, key); - return (String) key; - } - - } - return null; - } - - private String getBaseURI(String host) { - return "http://" + host + ":8080/"; - } - - @Override - public void setLocMap(ListMultimap locMap) { - this.locMap = locMap; - - } - -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/identity/token/IDeadTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/IDeadTokenRetriever.java deleted file mode 100755 index 9061f5bfd..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/IDeadTokenRetriever.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.ImplementedBy; -import com.netflix.priam.identity.PriamInstance; - -@ImplementedBy(DeadTokenRetriever.class) -public interface IDeadTokenRetriever { - - PriamInstance get() throws Exception; - - /* - * @return the IP address of the dead instance to which we will acquire its token - */ - String getReplaceIp(); - - /* - * @param A map of the rac for each instance. - */ - void setLocMap(ListMultimap locMap); -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/INewTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/INewTokenRetriever.java deleted file mode 100755 index ae3dcf1a9..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/INewTokenRetriever.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.ImplementedBy; -import com.netflix.priam.identity.PriamInstance; - -@ImplementedBy(NewTokenRetriever.class) -public interface INewTokenRetriever { - - PriamInstance get() throws Exception; - - /* - * @param A map of the rac for each instance. - */ - void setLocMap(ListMultimap locMap); -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/IPreGeneratedTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/IPreGeneratedTokenRetriever.java deleted file mode 100755 index a98e2da5c..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/IPreGeneratedTokenRetriever.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.ImplementedBy; -import com.netflix.priam.identity.PriamInstance; - -@ImplementedBy(PreGeneratedTokenRetriever.class) -public interface IPreGeneratedTokenRetriever { - - PriamInstance get() throws Exception; - - /* - * @param A map of the rac for each instance. - */ - void setLocMap(ListMultimap locMap); -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/ITokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/ITokenRetriever.java new file mode 100644 index 000000000..b22ff5907 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/token/ITokenRetriever.java @@ -0,0 +1,20 @@ +package com.netflix.priam.identity.token; + +import com.google.inject.ImplementedBy; +import com.netflix.priam.identity.PriamInstance; +import java.util.Optional; +import org.apache.commons.lang3.math.Fraction; + +/** Fetches PriamInstances and other data which is convenient at the time */ +@ImplementedBy(TokenRetriever.class) +public interface ITokenRetriever { + PriamInstance get() throws Exception; + + /** Gets the IP address of the dead instance whose token we will acquire. */ + Optional getReplacedIp(); + + boolean isTokenPregenerated(); + + /** returns the percentage of tokens in the ring which come before this node's token */ + Fraction getRingPosition() throws Exception; +} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/NewTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/NewTokenRetriever.java deleted file mode 100755 index 189e8bd19..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/NewTokenRetriever.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.utils.ITokenManager; -import com.netflix.priam.utils.Sleeper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Random; - -public class NewTokenRetriever extends TokenRetrieverBase implements INewTokenRetriever { - - private static final Logger logger = LoggerFactory.getLogger(NewTokenRetriever.class); - private IPriamInstanceFactory factory; - private IMembership membership; - private IConfiguration config; - private Sleeper sleeper; - private ITokenManager tokenManager; - private ListMultimap locMap; - - @Inject - //Note: do not parameterized the generic type variable to an implementation as it confuses Guice in the binding. - public NewTokenRetriever(IPriamInstanceFactory factory, IMembership membership, IConfiguration config, Sleeper sleeper, ITokenManager tokenManager) { - this.factory = factory; - this.membership = membership; - this.config = config; - this.sleeper = sleeper; - this.tokenManager = tokenManager; - } - - @Override - public PriamInstance get() throws Exception { - - logger.info("Generating my own and new token"); - // Sleep random interval - upto 15 sec - sleeper.sleep(new Random().nextInt(15000)); - int hash = tokenManager.regionOffset(config.getDC()); - // use this hash so that the nodes are spred far away from the other - // regions. - - int max = hash; - List allInstances = factory.getAllIds(config.getAppName()); - for (PriamInstance data : allInstances) - max = (data.getRac().equals(config.getRac()) && (data.getId() > max)) ? data.getId() : max; - int maxSlot = max - hash; - int my_slot = 0; - - if (hash == max && locMap.get(config.getRac()).size() == 0) { - int idx = config.getRacs().indexOf(config.getRac()); - if (idx < 0) - throw new Exception(String.format("Rac %s is not in Racs %s", config.getRac(), config.getRacs())); - my_slot = idx + maxSlot; - } else - my_slot = config.getRacs().size() + maxSlot; - - logger.info("Trying to createToken with slot {} with rac count {} with rac membership size {} with dc {}", - my_slot, membership.getRacCount(), membership.getRacMembershipSize(), config.getDC()); - String payload = tokenManager.createToken(my_slot, membership.getRacCount(), membership.getRacMembershipSize(), config.getDC()); - return factory.create(config.getAppName(), my_slot + hash, config.getInstanceName(), config.getHostname(), config.getHostIP(), config.getRac(), null, payload); - - } - - /* - * @param A map of the rac for each instance. - */ - @Override - public void setLocMap(ListMultimap locMap) { - this.locMap = locMap; - } - -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/PreGeneratedTokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/PreGeneratedTokenRetriever.java deleted file mode 100755 index 98f32ad78..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/PreGeneratedTokenRetriever.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.google.common.collect.ListMultimap; -import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.utils.Sleeper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Random; - -public class PreGeneratedTokenRetriever extends TokenRetrieverBase implements IPreGeneratedTokenRetriever { - - private static final Logger logger = LoggerFactory.getLogger(PreGeneratedTokenRetriever.class); - private IPriamInstanceFactory factory; - private IMembership membership; - private IConfiguration config; - private Sleeper sleeper; - private ListMultimap locMap; - - @Inject - public PreGeneratedTokenRetriever(IPriamInstanceFactory factory, IMembership membership, IConfiguration config, Sleeper sleeper) { - this.factory = factory; - this.membership = membership; - this.config = config; - this.sleeper = sleeper; - } - - @Override - public PriamInstance get() throws Exception { - logger.info("Looking for any pre-generated token"); - - final List allIds = factory.getAllIds(config.getAppName()); - List asgInstances = membership.getRacMembership(); - // Sleep random interval - upto 15 sec - sleeper.sleep(new Random().nextInt(5000) + 10000); - for (PriamInstance dead : allIds) { - // test same zone and is it is alive. - if (!dead.getRac().equals(config.getRac()) || asgInstances.contains(dead.getInstanceId()) || !isInstanceDummy(dead)) - continue; - logger.info("Found pre-generated token: {}", dead.getToken()); - PriamInstance markAsDead = factory.create(dead.getApp() + "-dead", dead.getId(), dead.getInstanceId(), dead.getHostName(), dead.getHostIP(), dead.getRac(), dead.getVolumes(), - dead.getToken()); - // remove it as we marked it down... - factory.delete(dead); - - - String payLoad = markAsDead.getToken(); - logger.info("Trying to grab slot {} with availability zone {}", markAsDead.getId(), markAsDead.getRac()); - return factory.create(config.getAppName(), markAsDead.getId(), config.getInstanceName(), config.getHostname(), config.getHostIP(), config.getRac(), markAsDead.getVolumes(), payLoad); - } - return null; - } - - @Override - public void setLocMap(ListMultimap locMap) { - this.locMap = locMap; - - } - -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/TokenRetriever.java b/priam/src/main/java/com/netflix/priam/identity/token/TokenRetriever.java new file mode 100644 index 000000000..8ad3e02ae --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/token/TokenRetriever.java @@ -0,0 +1,336 @@ +package com.netflix.priam.identity.token; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.IMembership; +import com.netflix.priam.identity.IPriamInstanceFactory; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.utils.ITokenManager; +import com.netflix.priam.utils.RetryableCallable; +import com.netflix.priam.utils.Sleeper; +import java.math.BigInteger; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.Set; +import java.util.stream.Collectors; +import javax.inject.Inject; +import org.apache.commons.lang3.math.Fraction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TokenRetriever implements ITokenRetriever { + + public static final String NEW_SLOT = "new_slot"; + private static final int MAX_VALUE_IN_MILISECS = 300000; // sleep up to 5 minutes + private static final Logger logger = LoggerFactory.getLogger(InstanceIdentity.class); + + private final Random randomizer; + private final Sleeper sleeper; + private final IPriamInstanceFactory factory; + private final IMembership membership; + private final IConfiguration config; + private final ITokenManager tokenManager; + + // Instance information contains other information like ASG/vpc-id etc. + private InstanceInfo myInstanceInfo; + private boolean isTokenPregenerated = false; + private String replacedIp; + private PriamInstance priamInstance; + + @Inject + public TokenRetriever( + IPriamInstanceFactory factory, + IMembership membership, + IConfiguration config, + InstanceInfo instanceInfo, + Sleeper sleeper, + ITokenManager tokenManager) { + this.factory = factory; + this.membership = membership; + this.config = config; + this.myInstanceInfo = instanceInfo; + this.randomizer = new Random(); + this.sleeper = sleeper; + this.tokenManager = tokenManager; + } + + @Override + public PriamInstance get() throws Exception { + if (priamInstance == null) { + priamInstance = grabPreAssignedToken(); + } + if (priamInstance == null) { + priamInstance = grabExistingToken(); + } + if (priamInstance == null) { + priamInstance = grabNewToken(); + } + logger.info("My instance: {}", priamInstance); + return priamInstance; + } + + @Override + public Optional getReplacedIp() { + return Optional.ofNullable(replacedIp); + } + + @Override + public boolean isTokenPregenerated() { + return isTokenPregenerated; + } + + @Override + public Fraction getRingPosition() throws Exception { + get(); + BigInteger token = new BigInteger(priamInstance.getToken()); + ImmutableSet nodes = factory.getAllIds(config.getAppName()); + long ringPosition = + nodes.stream() + .filter(node -> token.compareTo(new BigInteger(node.getToken())) > 0) + .count(); + return Fraction.getFraction(Math.toIntExact(ringPosition), nodes.size()); + } + + private PriamInstance grabPreAssignedToken() throws Exception { + return new RetryableCallable() { + @Override + public PriamInstance retriableCall() throws Exception { + logger.info("Trying to grab a pre-assigned token."); + // Check if this node is decommissioned. + ImmutableSet allIds = + factory.getAllIds(config.getAppName() + "-dead"); + Optional instance = + findInstance(allIds).map(PriamInstance::setOutOfService); + if (!instance.isPresent()) { + ImmutableSet liveNodes = factory.getAllIds(config.getAppName()); + instance = instance.map(Optional::of).orElseGet(() -> findInstance(liveNodes)); + if (instance.isPresent()) { + // Why check gossip? Priam might have crashed before bootstrapping + // Cassandra in replace mode. + replacedIp = getReplacedIpForAssignedToken(liveNodes, instance.get()); + } + } + return instance.map(i -> claimToken(i)).orElse(null); + } + }.call(); + } + + @VisibleForTesting + public PriamInstance grabExistingToken() throws Exception { + return new RetryableCallable() { + @Override + public PriamInstance retriableCall() throws Exception { + logger.info("Trying to grab an existing token"); + sleeper.sleep(new Random().nextInt(5000) + 10000); + Set racInstanceIds = getRacInstanceIds(); + ImmutableSet allIds = factory.getAllIds(config.getAppName()); + List instances = + allIds.stream() + .filter(i -> i.getRac().equals(myInstanceInfo.getRac())) + .filter(i -> !racInstanceIds.contains(i.getInstanceId())) + .collect(Collectors.toList()); + Optional candidate = + instances.stream().filter(i -> !isNew(i)).findFirst(); + candidate.ifPresent(i -> replacedIp = getReplacedIpForExistingToken(allIds, i)); + if (replacedIp == null) { + candidate = instances.stream().filter(i -> isNew(i)).findFirst(); + candidate.ifPresent(i -> isTokenPregenerated = true); + } + return candidate.map(i -> claimToken(i)).orElse(null); + } + }.call(); + } + + private PriamInstance grabNewToken() throws Exception { + Preconditions.checkState(config.isCreateNewTokenEnable()); + return new RetryableCallable() { + @Override + public PriamInstance retriableCall() throws Exception { + set(100, 100); + logger.info("Trying to generate a new token"); + sleeper.sleep(new Random().nextInt(15000)); + String myRegion = myInstanceInfo.getRegion(); + // this offset ensures the nodes are spread far away from the other regions. + int regionOffset = tokenManager.regionOffset(myRegion); + String myRac = myInstanceInfo.getRac(); + List racs = config.getRacs(); + int mySlot = + factory.getAllIds(config.getAppName()) + .stream() + .filter(i -> i.getRac().equals(myRac)) + .map(PriamInstance::getId) + .max(Integer::compareTo) + .map(id -> racs.size() + Math.max(id, regionOffset) - regionOffset) + .orElseGet( + () -> { + Preconditions.checkState(racs.contains(myRac)); + return racs.indexOf(myRac); + }); + int instanceCount = membership.getRacCount() * membership.getRacMembershipSize(); + String newToken = tokenManager.createToken(mySlot, instanceCount, myRegion); + return createToken(mySlot + regionOffset, newToken); + } + }.call(); + } + + private String getReplacedIpForAssignedToken( + ImmutableSet aliveInstances, PriamInstance instance) + throws TokenRetrieverUtils.GossipParseException { + // Infer current ownership information from other instances using gossip. + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip( + aliveInstances, instance.getToken(), instance.getDC()); + // if unreachable rely on token database. + // if mismatch rely on token database. + String ipToReplace = null; + if (inferredTokenOwnership.getTokenInformationStatus() + == TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD) { + Preconditions.checkNotNull(inferredTokenOwnership.getTokenInformation()); + String inferredIp = inferredTokenOwnership.getTokenInformation().getIpAddress(); + if (!inferredIp.equals(myInstanceInfo.getHostIP()) + && !inferredIp.equals(myInstanceInfo.getPrivateIP())) { + if (inferredTokenOwnership.getTokenInformation().isLive()) { + throw new TokenRetrieverUtils.GossipParseException( + "We have been assigned a token that C* thinks is alive. Throwing to buy time in the hopes that Gossip just needs to settle."); + } + ipToReplace = inferredIp; + logger.info( + "Priam found that the token is not alive according to Cassandra and we should start Cassandra in replace mode with replace ip: " + + inferredIp); + } + } else if (inferredTokenOwnership.getTokenInformationStatus() + == TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus + .MISMATCH + && !config.permitDirectTokenAssignmentWithGossipMismatch()) { + throw new TokenRetrieverUtils.GossipParseException( + "We saw inconsistent results from gossip. Throwing to buy time for it to settle."); + } + return ipToReplace; + } + + private String getReplacedIpForExistingToken( + ImmutableSet allInstancesWithinCluster, PriamInstance priamInstance) { + + // Infer current ownership information from other instances using gossip. + TokenRetrieverUtils.InferredTokenOwnership inferredTokenInformation = + TokenRetrieverUtils.inferTokenOwnerFromGossip( + allInstancesWithinCluster, priamInstance.getToken(), priamInstance.getDC()); + + switch (inferredTokenInformation.getTokenInformationStatus()) { + case GOOD: + if (inferredTokenInformation.getTokenInformation() == null) { + logger.error( + "If you see this message, it should not have happened. We expect token ownership information if all nodes agree. This is a code bounty issue."); + return null; + } + // Everyone agreed to a value. Check if it is live node. + if (inferredTokenInformation.getTokenInformation().isLive()) { + logger.info( + "This token is considered alive unanimously! We will not replace this instance."); + return null; + } else { + String ip = inferredTokenInformation.getTokenInformation().getIpAddress(); + logger.info("Will try to replace token owned by {}", ip); + return ip; + } + case UNREACHABLE_NODES: + // In case of unable to reach sufficient nodes, fallback to IP in token + // database. This could be a genuine case of say missing security + // permissions. + logger.warn( + "Unable to reach sufficient nodes. Please check security group permissions or there might be a network partition."); + logger.info( + "Will try to replace token: {} with replacedIp from Token database: {}", + priamInstance.getToken(), + priamInstance.getHostIP()); + return priamInstance.getHostIP(); + case MISMATCH: + // Lets not replace the instance if gossip info is not merging!! + logger.info( + "Mismatch in gossip. We will not replace this instance, until gossip settles down."); + return null; + default: + throw new IllegalStateException( + "Unexpected value: " + + inferredTokenInformation.getTokenInformationStatus()); + } + } + + private PriamInstance claimToken(PriamInstance originalInstance) { + String hostIP = + config.usePrivateIP() ? myInstanceInfo.getPrivateIP() : myInstanceInfo.getHostIP(); + if (originalInstance.getInstanceId().equals(myInstanceInfo.getInstanceId()) + && originalInstance.getHostName().equals(myInstanceInfo.getHostname()) + && originalInstance.getHostIP().equals(hostIP) + && originalInstance.getRac().equals(myInstanceInfo.getRac())) { + return originalInstance; + } + PriamInstance newInstance = new PriamInstance(); + newInstance.setApp(config.getAppName()); + newInstance.setId(originalInstance.getId()); + newInstance.setInstanceId(myInstanceInfo.getInstanceId()); + newInstance.setHost(myInstanceInfo.getHostname()); + newInstance.setHostIP(hostIP); + newInstance.setRac(myInstanceInfo.getRac()); + newInstance.setVolumes(originalInstance.getVolumes()); + newInstance.setToken(originalInstance.getToken()); + newInstance.setDC(originalInstance.getDC()); + try { + factory.update(originalInstance, newInstance); + } catch (Exception ex) { + long sleepTime = randomizer.nextInt(MAX_VALUE_IN_MILISECS); + String token = newInstance.getToken(); + logger.warn("Failed updating token: {}; sleeping {} millis", token, sleepTime); + sleeper.sleepQuietly(sleepTime); + throw ex; + } + return newInstance; + } + + private PriamInstance createToken(int id, String token) { + try { + String hostIp = + config.usePrivateIP() + ? myInstanceInfo.getPrivateIP() + : myInstanceInfo.getHostIP(); + return factory.create( + config.getAppName(), + id, + myInstanceInfo.getInstanceId(), + myInstanceInfo.getHostname(), + hostIp, + myInstanceInfo.getRac(), + null /* volumes */, + token); + } catch (Exception ex) { + long sleepTime = randomizer.nextInt(MAX_VALUE_IN_MILISECS); + logger.warn("Failed updating token: {}; sleeping {} millis", token, sleepTime); + sleeper.sleepQuietly(sleepTime); + throw ex; + } + } + + private Optional findInstance(ImmutableSet instances) { + return instances + .stream() + .filter((i) -> i.getInstanceId().equals(myInstanceInfo.getInstanceId())) + .findFirst(); + } + + private Set getRacInstanceIds() { // TODO(CASS-1986) + ImmutableSet racMembership = membership.getRacMembership(); + return config.isDualAccount() + ? Sets.union(membership.getCrossAccountRacMembership(), racMembership) + : racMembership; + } + + private boolean isNew(PriamInstance instance) { + return instance.getInstanceId().equals(NEW_SLOT); + } +} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverBase.java b/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverBase.java deleted file mode 100755 index 1153cbbef..000000000 --- a/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverBase.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.identity.token; - -import com.netflix.priam.identity.PriamInstance; - -import java.util.Random; - -public class TokenRetrieverBase { - - public static final String DUMMY_INSTANCE_ID = "new_slot"; - private static final int MAX_VALUE_IN_MILISECS = 300000; //sleep up to 5 minutes - protected Random randomizer; - - public TokenRetrieverBase() { - this.randomizer = new Random(); - } - - protected boolean isInstanceDummy(PriamInstance instance) { - return instance.getInstanceId().equals(DUMMY_INSTANCE_ID); - } - - /* - * Return a random time for a thread to sleep. - * - * @return time in millisecs - */ - protected long getSleepTime() { - return (long) this.randomizer.nextInt(MAX_VALUE_IN_MILISECS); - } -} diff --git a/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverUtils.java b/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverUtils.java new file mode 100644 index 000000000..5827e01ae --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/identity/token/TokenRetrieverUtils.java @@ -0,0 +1,218 @@ +package com.netflix.priam.identity.token; + +import com.google.common.collect.ImmutableSet; +import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.utils.GsonJsonSerializer; +import com.netflix.priam.utils.SystemUtils; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Common utilities for token retrieval. */ +public class TokenRetrieverUtils { + private static final Logger logger = LoggerFactory.getLogger(TokenRetrieverUtils.class); + private static final String STATUS_URL_FORMAT = "http://%s:8080/Priam/REST/v1/cassadmin/status"; + + /** + * Utility method to infer the IP of the owner of a token in a given datacenter. This method + * uses Cassandra status information to find the owner. While it is ideal to check all the nodes + * in the ring to see if they agree on the IP to be replaced, in large clusters it may affect + * the startup performance. This method picks at most 3 random hosts from the ring and see if + * they all agree on the IP to be replaced. If not, it returns null. + * + * @param allIds + * @param token + * @param dc + * @return IP of the token owner based on gossip information or null if C* status doesn't + * converge. + */ + public static InferredTokenOwnership inferTokenOwnerFromGossip( + ImmutableSet allIds, String token, String dc) { + + // Avoid using dead instance who we are trying to replace (duh!!) + // Avoid other regions instances to avoid communication over public ip address. + List eligibleInstances = + allIds.stream() + .filter(priamInstance -> !priamInstance.getToken().equalsIgnoreCase(token)) + .filter(priamInstance -> priamInstance.getDC().equalsIgnoreCase(dc)) + .collect(Collectors.toList()); + // We want to get IP from min 1, max 3 instances to ensure we are not relying on + // gossip of a single instance. + // Good idea to shuffle so we are not talking to same instances every time. + Collections.shuffle(eligibleInstances); + // Potential issue could be when you have about 50% of your cluster C* DOWN or + // trying to be replaced. + // Think of a major disaster hitting your cluster. In that scenario chances of + // instance hitting DOWN C* are much much higher. + // In such a case you should rely on @link{CassandraConfig#setReplacedIp}. + int noOfInstancesGossipShouldMatch = Math.max(1, Math.min(3, eligibleInstances.size())); + + // While it is ideal to check all the nodes in the ring to see if they agree on + // the IP to be replaced, in large clusters it may affect the startup + // performance. So we pick three random hosts from the ring and see if they all + // agree on the IP to be replaced. If not, we don't replace. + InferredTokenOwnership inferredTokenOwnership = new InferredTokenOwnership(); + int matchedGossipInstances = 0, reachableInstances = 0; + for (PriamInstance instance : eligibleInstances) { + logger.info("Finding down nodes from ip[{}]; token[{}]", instance.getHostIP(), token); + + try { + TokenInformation tokenInformation = + getTokenInformation(instance.getHostIP(), token); + reachableInstances++; + + if (inferredTokenOwnership.getTokenInformation() == null) { + inferredTokenOwnership.setTokenInformation(tokenInformation); + } + + if (inferredTokenOwnership.getTokenInformation().equals(tokenInformation)) { + matchedGossipInstances++; + if (matchedGossipInstances == noOfInstancesGossipShouldMatch) { + inferredTokenOwnership.setTokenInformationStatus( + InferredTokenOwnership.TokenInformationStatus.GOOD); + return inferredTokenOwnership; + } + } else { + // Mismatch in the gossip information from Cassandra. + inferredTokenOwnership.setTokenInformationStatus( + InferredTokenOwnership.TokenInformationStatus.MISMATCH); + logger.info( + "There is a mismatch in the status information reported by Cassandra. TokenInformation1: {}, TokenInformation2: {}", + inferredTokenOwnership.getTokenInformation(), + tokenInformation); + inferredTokenOwnership.setTokenInformation( + inferredTokenOwnership.getTokenInformation().isLive + ? inferredTokenOwnership.getTokenInformation() + : tokenInformation); + return inferredTokenOwnership; + } + + } catch (GossipParseException e) { + logger.warn(e.getMessage()); + } + } + + // If we are not able to reach at least minimum required instances. + if (reachableInstances < noOfInstancesGossipShouldMatch) { + inferredTokenOwnership.setTokenInformationStatus( + InferredTokenOwnership.TokenInformationStatus.UNREACHABLE_NODES); + logger.info( + String.format( + "Unable to find enough instances where gossip match. Required: [%d]", + noOfInstancesGossipShouldMatch)); + } + + return inferredTokenOwnership; + } + + // helper method to get the token owner IP from a Cassandra node. + private static TokenInformation getTokenInformation(String ip, String token) + throws GossipParseException { + String response = null; + try { + response = SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, ip)); + JSONObject jsonObject = (JSONObject) new JSONParser().parse(response); + JSONArray liveNodes = (JSONArray) jsonObject.get("live"); + JSONObject tokenToEndpointMap = (JSONObject) jsonObject.get("tokenToEndpointMap"); + String endpointInfo = tokenToEndpointMap.get(token).toString(); + // We intentionally do not use the "unreachable" nodes as it may or may not be the best + // place to start. + // We just verify that the endpoint we provide is not "live". + boolean isLive = liveNodes.contains(endpointInfo); + return new TokenInformation(endpointInfo, isLive); + } catch (RuntimeException e) { + throw new GossipParseException( + String.format("Error in reaching out to host: [%s]", ip), e); + } catch (ParseException e) { + throw new GossipParseException( + String.format( + "Error in parsing gossip response [%s] from host: [%s]", response, ip), + e); + } + } + + public static class TokenInformation { + private String ipAddress; + private boolean isLive; + + public TokenInformation(String ipAddress, boolean isLive) { + this.ipAddress = ipAddress; + this.isLive = isLive; + } + + public boolean isLive() { + return isLive; + } + + public String getIpAddress() { + return ipAddress; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || this.getClass() != obj.getClass()) return false; + TokenInformation tokenInformation = (TokenInformation) obj; + return this.ipAddress.equalsIgnoreCase(tokenInformation.getIpAddress()) + && isLive == tokenInformation.isLive; + } + + public String toString() { + return GsonJsonSerializer.getGson().toJson(this); + } + } + + public static class InferredTokenOwnership { + public enum TokenInformationStatus { + GOOD, + UNREACHABLE_NODES, + MISMATCH + } + + private TokenInformationStatus tokenInformationStatus = + TokenInformationStatus.UNREACHABLE_NODES; + private TokenInformation tokenInformation; + + public void setTokenInformationStatus(TokenInformationStatus tokenInformationStatus) { + this.tokenInformationStatus = tokenInformationStatus; + } + + public void setTokenInformation(TokenInformation tokenInformation) { + this.tokenInformation = tokenInformation; + } + + public TokenInformationStatus getTokenInformationStatus() { + return tokenInformationStatus; + } + + public TokenInformation getTokenInformation() { + return tokenInformation; + } + } + + /** + * This exception is thrown either when instances are not available or when they return invalid + * response. + */ + public static class GossipParseException extends Exception { + private static final long serialVersionUID = 1462488371031437486L; + + public GossipParseException() { + super(); + } + + public GossipParseException(String message) { + super(message); + } + + public GossipParseException(String message, Throwable t) { + super(message, t); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/merics/BackupMetrics.java b/priam/src/main/java/com/netflix/priam/merics/BackupMetrics.java index 8b12b53aa..e9b5654a3 100644 --- a/priam/src/main/java/com/netflix/priam/merics/BackupMetrics.java +++ b/priam/src/main/java/com/netflix/priam/merics/BackupMetrics.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.merics; @@ -19,65 +17,75 @@ import com.google.inject.Singleton; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.DistributionSummary; -import com.netflix.spectator.api.Gauge; import com.netflix.spectator.api.Registry; -/** - * Created by vinhn on 2/13/17. - */ +/** Created by vinhn on 2/13/17. */ @Singleton public class BackupMetrics { + private final Registry registry; /** - * Distribution summary will provide the metric like count (how many uploads were made), max no. of bytes uploaded and total amount of bytes uploaded. + * Distribution summary will provide the metric like count (how many uploads were made), max no. + * of bytes uploaded and total amount of bytes uploaded. */ - private final DistributionSummary uploadRate; - private final Counter validUploads, invalidUploads, validDownloads, invalidDownloads, awsSlowDownException, snsNotificationSuccess, snsNotificationFailure; + private final DistributionSummary uploadRate, downloadRate; + + private final Counter validUploads, + validDownloads, + invalidUploads, + invalidDownloads, + snsNotificationSuccess, + snsNotificationFailure, + forgottenFiles, + backupVerificationFailure; + public static final String uploadQueueSize = Metrics.METRIC_PREFIX + "upload.queue.size"; + public static final String downloadQueueSize = Metrics.METRIC_PREFIX + "download.queue.size"; @Inject public BackupMetrics(Registry registry) { + this.registry = registry; validDownloads = registry.counter(Metrics.METRIC_PREFIX + "download.valid"); invalidDownloads = registry.counter(Metrics.METRIC_PREFIX + "download.invalid"); validUploads = registry.counter(Metrics.METRIC_PREFIX + "upload.valid"); invalidUploads = registry.counter(Metrics.METRIC_PREFIX + "upload.invalid"); uploadRate = registry.distributionSummary(Metrics.METRIC_PREFIX + "upload.rate"); - awsSlowDownException = registry.counter(Metrics.METRIC_PREFIX + "aws.slowDown"); - snsNotificationSuccess = registry.counter(Metrics.METRIC_PREFIX + "sns.notification.success"); - snsNotificationFailure = registry.counter(Metrics.METRIC_PREFIX + "sns.notification.failure"); + downloadRate = registry.distributionSummary(Metrics.METRIC_PREFIX + "download.rate"); + snsNotificationSuccess = + registry.counter(Metrics.METRIC_PREFIX + "sns.notification.success"); + snsNotificationFailure = + registry.counter(Metrics.METRIC_PREFIX + "sns.notification.failure"); + forgottenFiles = registry.counter(Metrics.METRIC_PREFIX + "forgotten.files"); + backupVerificationFailure = + registry.counter(Metrics.METRIC_PREFIX + "backup.verification.failure"); } - public void incrementValidUploads() { - this.validUploads.increment(); + public DistributionSummary getUploadRate() { + return uploadRate; } - public void incrementInvalidUploads() { - this.invalidUploads.increment(); + public Counter getInvalidUploads() { + return invalidUploads; } - public long getValidUploads() { - return this.validUploads.count(); + public Counter getInvalidDownloads() { + return invalidDownloads; } - public long getValidDownloads() { - return this.validDownloads.count(); + public Counter getSnsNotificationSuccess() { + return snsNotificationSuccess; } - public void incrementValidDownloads() { - this.invalidDownloads.increment(); + public Counter getSnsNotificationFailure() { + return snsNotificationFailure; } + public void incrementInvalidUploads() { + this.invalidUploads.increment(); + } public void incrementInvalidDownloads() { this.invalidDownloads.increment(); } - public long getAwsSlowDownException() { - return awsSlowDownException.count(); - } - - public void incrementAwsSlowDownException(int awsSlowDown) { - awsSlowDownException.increment(awsSlowDown); - } - public void incrementSnsNotificationSuccess() { snsNotificationSuccess.increment(); } @@ -86,8 +94,43 @@ public void incrementSnsNotificationFailure() { snsNotificationFailure.increment(); } + public void incrementBackupVerificationFailure() { + backupVerificationFailure.increment(); + } + public void recordUploadRate(long sizeInBytes) { uploadRate.record(sizeInBytes); } + public void incrementForgottenFiles(long forgottenFilesVal) { + forgottenFiles.increment(forgottenFilesVal); + } + + public void recordDownloadRate(long sizeInBytes) { + downloadRate.record(sizeInBytes); + } + + public DistributionSummary getDownloadRate() { + return downloadRate; + } + + public Counter getValidUploads() { + return validUploads; + } + + public Counter getValidDownloads() { + return validDownloads; + } + + public void incrementValidUploads() { + this.validUploads.increment(); + } + + public void incrementValidDownloads() { + this.validDownloads.increment(); + } + + public Registry getRegistry() { + return registry; + } } diff --git a/priam/src/main/java/com/netflix/priam/merics/CassMonitorMetrics.java b/priam/src/main/java/com/netflix/priam/merics/CassMonitorMetrics.java index 5cafa5c12..919789689 100644 --- a/priam/src/main/java/com/netflix/priam/merics/CassMonitorMetrics.java +++ b/priam/src/main/java/com/netflix/priam/merics/CassMonitorMetrics.java @@ -1,16 +1,14 @@ /** * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.merics; @@ -19,19 +17,38 @@ import com.google.inject.Singleton; import com.netflix.spectator.api.Gauge; import com.netflix.spectator.api.Registry; +import com.netflix.spectator.api.patterns.PolledMeter; +import java.util.concurrent.atomic.AtomicLong; -/** - * @author vchella - */ +/** @author vchella */ @Singleton public class CassMonitorMetrics { private final Gauge cassStop, cassAutoStart, cassStart; + private final AtomicLong getSeedsCnt, getTokenCnt, getReplacedIpCnt, doubleRingCnt; @Inject public CassMonitorMetrics(Registry registry) { cassStop = registry.gauge(Metrics.METRIC_PREFIX + "cass.stop"); cassStart = registry.gauge(Metrics.METRIC_PREFIX + "cass.start"); cassAutoStart = registry.gauge(Metrics.METRIC_PREFIX + "cass.auto.start"); + + getSeedsCnt = + PolledMeter.using(registry) + .withName(Metrics.METRIC_PREFIX + "cass.getSeedCnt") + .monitorMonotonicCounter(new AtomicLong(0)); + getTokenCnt = + PolledMeter.using(registry) + .withName(Metrics.METRIC_PREFIX + "cass.getTokenCnt") + .monitorMonotonicCounter(new AtomicLong(0)); + getReplacedIpCnt = + PolledMeter.using(registry) + .withName(Metrics.METRIC_PREFIX + "cass.getReplacedIpCnt") + .monitorMonotonicCounter(new AtomicLong(0)); + + doubleRingCnt = + PolledMeter.using(registry) + .withName(Metrics.METRIC_PREFIX + "cass.doubleRingCnt") + .monitorMonotonicCounter(new AtomicLong(0)); } public void incCassStop() { @@ -46,4 +63,19 @@ public void incCassStart() { cassStart.set(cassStart.value() + 1); } + public void incGetSeeds() { + getSeedsCnt.incrementAndGet(); + } + + public void incGetToken() { + getTokenCnt.incrementAndGet(); + } + + public void incGetReplacedIp() { + getReplacedIpCnt.incrementAndGet(); + } + + public void incDoubleRing() { + doubleRingCnt.incrementAndGet(); + } } diff --git a/priam/src/main/java/com/netflix/priam/merics/CompactionMeasurement.java b/priam/src/main/java/com/netflix/priam/merics/CompactionMeasurement.java index fbd1dad1e..b3ff3a1cf 100644 --- a/priam/src/main/java/com/netflix/priam/merics/CompactionMeasurement.java +++ b/priam/src/main/java/com/netflix/priam/merics/CompactionMeasurement.java @@ -18,14 +18,10 @@ import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Registry; - import javax.inject.Inject; import javax.inject.Singleton; -/** - * Measurement class for scheduled compactions - * Created by aagrawal on 2/28/18. - */ +/** Measurement class for scheduled compactions Created by aagrawal on 2/28/18. */ @Singleton public class CompactionMeasurement implements IMeasurement { private final Counter failure, success; @@ -36,7 +32,6 @@ public CompactionMeasurement(Registry registry) { success = registry.counter(Metrics.METRIC_PREFIX + "compaction.success"); } - public void incrementFailure() { this.failure.increment(); } diff --git a/priam/src/main/java/com/netflix/priam/merics/IMeasurement.java b/priam/src/main/java/com/netflix/priam/merics/IMeasurement.java index 0a83d356a..e00a5e95d 100644 --- a/priam/src/main/java/com/netflix/priam/merics/IMeasurement.java +++ b/priam/src/main/java/com/netflix/priam/merics/IMeasurement.java @@ -1,25 +1,22 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.merics; /** - * * Represents a specific measurement for publishing to a metric system * - * Created by vinhn on 10/14/16. + *

Created by vinhn on 10/14/16. */ public interface IMeasurement { void incrementFailure(); diff --git a/priam/src/main/java/com/netflix/priam/merics/Metrics.java b/priam/src/main/java/com/netflix/priam/merics/Metrics.java index 1c9f1168c..41cb61867 100644 --- a/priam/src/main/java/com/netflix/priam/merics/Metrics.java +++ b/priam/src/main/java/com/netflix/priam/merics/Metrics.java @@ -17,9 +17,7 @@ package com.netflix.priam.merics; -/** - * Created by aagrawal on 8/15/18. - */ +/** Created by aagrawal on 8/15/18. */ public interface Metrics { String METRIC_PREFIX = "priam."; } diff --git a/priam/src/main/java/com/netflix/priam/merics/NodeToolFlushMeasurement.java b/priam/src/main/java/com/netflix/priam/merics/NodeToolFlushMeasurement.java index 3670feb5f..89b16a745 100644 --- a/priam/src/main/java/com/netflix/priam/merics/NodeToolFlushMeasurement.java +++ b/priam/src/main/java/com/netflix/priam/merics/NodeToolFlushMeasurement.java @@ -1,31 +1,27 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.merics; import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.Registry; - import javax.inject.Inject; import javax.inject.Singleton; /** - * * Represents the value to be publish to a telemetry endpoint * - * Created by vinhn on 10/14/16. + *

Created by vinhn on 10/14/16. */ @Singleton public class NodeToolFlushMeasurement implements IMeasurement { @@ -45,4 +41,3 @@ public void incrementSuccess() { success.increment(); } } - diff --git a/priam/src/main/java/com/netflix/priam/merics/SecurityMetrics.java b/priam/src/main/java/com/netflix/priam/merics/SecurityMetrics.java new file mode 100644 index 000000000..7dac74222 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/merics/SecurityMetrics.java @@ -0,0 +1,21 @@ +package com.netflix.priam.merics; + +import com.google.inject.Inject; +import com.netflix.spectator.api.Gauge; +import com.netflix.spectator.api.Registry; +import javax.inject.Singleton; + +/** Metrics pertaining to network security. Currently just publishes a count of ingress rules. */ +@Singleton +public class SecurityMetrics { + private final Gauge ingressRules; + + @Inject + public SecurityMetrics(Registry registry) { + ingressRules = registry.gauge(Metrics.METRIC_PREFIX + "ingress.rules"); + } + + public void setIngressRules(int count) { + ingressRules.set(count); + } +} diff --git a/priam/src/main/java/com/netflix/priam/notification/AWSSnsNotificationService.java b/priam/src/main/java/com/netflix/priam/notification/AWSSnsNotificationService.java index dec091b14..61bcd91f3 100644 --- a/priam/src/main/java/com/netflix/priam/notification/AWSSnsNotificationService.java +++ b/priam/src/main/java/com/netflix/priam/notification/AWSSnsNotificationService.java @@ -23,74 +23,85 @@ import com.amazonaws.services.sns.model.PublishResult; import com.google.inject.Inject; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.aws.IAMCredential; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.merics.BackupMetrics; import com.netflix.priam.utils.BoundedExponentialRetryCallable; +import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Map; - /* * A single, persisted, connection to Amazon SNS. */ @Singleton public class AWSSnsNotificationService implements INotificationService { - private static final Logger logger = LoggerFactory.getLogger(AWSSnsNotificationService.class); + private static final Logger logger = LoggerFactory.getLogger(AWSSnsNotificationService.class); + + private final IConfiguration configuration; + private final AmazonSNS snsClient; + private final BackupMetrics backupMetrics; + + @Inject + public AWSSnsNotificationService( + IConfiguration config, + IAMCredential iamCredential, + BackupMetrics backupMetrics, + InstanceInfo instanceInfo) { + this.configuration = config; + this.backupMetrics = backupMetrics; + String ec2_region = instanceInfo.getRegion(); + snsClient = + AmazonSNSClient.builder() + .withCredentials(iamCredential.getAwsCredentialProvider()) + .withRegion(ec2_region) + .build(); + } + + @Override + public void notify( + final String msg, final Map messageAttributes) { + // e.g. arn:aws:sns:eu-west-1:1234:eu-west-1-cass-sample-backup + final String topic_arn = this.configuration.getBackupNotificationTopicArn(); + if (!configuration.enableBackupNotification() || StringUtils.isEmpty(topic_arn)) { + return; + } - private IConfiguration configuration; - private AmazonSNS snsClient; - private BackupMetrics backupMetrics; - - @Inject - public AWSSnsNotificationService(IConfiguration config, IAMCredential iamCredential - , BackupMetrics backupMetrics) { - this.configuration = config; - this.backupMetrics = backupMetrics; - String ec2_region = this.configuration.getDC(); - snsClient = AmazonSNSClient.builder() - .withCredentials(iamCredential.getAwsCredentialProvider()) - .withRegion(ec2_region).build(); - } - - @Override - public void notify(final String msg, final Map messageAttributes) { - final String topic_arn = this.configuration.getBackupNotificationTopicArn(); //e.g. arn:aws:sns:eu-west-1:1234:eu-west-1-cass-sample-backup - if (StringUtils.isEmpty(topic_arn)) { - return; - } - - PublishResult publishResult = null; - try { - publishResult = new BoundedExponentialRetryCallable() { - @Override - public PublishResult retriableCall() throws Exception { - PublishRequest publishRequest = new PublishRequest(topic_arn, msg).withMessageAttributes(messageAttributes); - PublishResult result = snsClient.publish(publishRequest); - return result; - } - }.call(); - - } catch (Exception e) { - logger.error(String.format("Exhausted retries. Publishing notification metric for failure and moving on. Failed msg to publish: {}", msg), e); - backupMetrics.incrementSnsNotificationFailure(); - return; - } + PublishResult publishResult; + try { + publishResult = + new BoundedExponentialRetryCallable() { + @Override + public PublishResult retriableCall() throws Exception { + PublishRequest publishRequest = + new PublishRequest(topic_arn, msg) + .withMessageAttributes(messageAttributes); + return snsClient.publish(publishRequest); + } + }.call(); - //If here, message was published. As a extra validation, ensure we have a msg id - String publishedMsgId = publishResult.getMessageId(); - if (publishedMsgId == null || publishedMsgId.isEmpty() ) { - backupMetrics.incrementSnsNotificationFailure(); - return; - } + } catch (Exception e) { + logger.error( + String.format( + "Exhausted retries. Publishing notification metric for failure and moving on. Failed msg to publish: %s", + msg), + e); + backupMetrics.incrementSnsNotificationFailure(); + return; + } - backupMetrics.incrementSnsNotificationSuccess(); - if (logger.isTraceEnabled()) { - logger.trace("Published msg: {} aws sns messageId - {}", msg, publishedMsgId); - } - } + // If here, message was published. As a extra validation, ensure we have a msg id + String publishedMsgId = publishResult.getMessageId(); + if (publishedMsgId == null || publishedMsgId.isEmpty()) { + backupMetrics.incrementSnsNotificationFailure(); + return; + } - -} \ No newline at end of file + backupMetrics.incrementSnsNotificationSuccess(); + if (logger.isTraceEnabled()) { + logger.trace("Published msg: {} aws sns messageId - {}", msg, publishedMsgId); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/notification/BackupEvent.java b/priam/src/main/java/com/netflix/priam/notification/BackupEvent.java index 4107f60e2..9f156858c 100644 --- a/priam/src/main/java/com/netflix/priam/notification/BackupEvent.java +++ b/priam/src/main/java/com/netflix/priam/notification/BackupEvent.java @@ -20,10 +20,9 @@ import com.netflix.priam.backup.AbstractBackupPath; /** - * POJO to encapsulate the details of backup of a file. - * Use this class to notify of the backup and its status. This will allow us to add more details about the backup event - * without modifying AbstractBackupPath. - * Created by aagrawal on 8/11/17. + * POJO to encapsulate the details of backup of a file. Use this class to notify of the backup and + * its status. This will allow us to add more details about the backup event without modifying + * AbstractBackupPath. Created by aagrawal on 8/11/17. */ public class BackupEvent { private AbstractBackupPath abstractBackupPath; diff --git a/priam/src/main/java/com/netflix/priam/notification/BackupNotificationMgr.java b/priam/src/main/java/com/netflix/priam/notification/BackupNotificationMgr.java index 4246c384a..2bef31e0c 100644 --- a/priam/src/main/java/com/netflix/priam/notification/BackupNotificationMgr.java +++ b/priam/src/main/java/com/netflix/priam/notification/BackupNotificationMgr.java @@ -1,36 +1,38 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.notification; import com.amazonaws.services.sns.model.MessageAttributeValue; import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.backup.BackupVerificationResult; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.identity.config.InstanceInfo; +import java.util.*; +import org.apache.commons.lang3.StringUtils; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.HashMap; -import java.util.Map; - /** * A means to nofity interested party(ies) of an uploaded file, success or failed. * - * Created by vinhn on 10/30/16. + *

Created by vinhn on 10/30/16. */ public class BackupNotificationMgr implements EventObserver { @@ -39,42 +41,139 @@ public class BackupNotificationMgr implements EventObserver { private static final String STARTED = "started"; private static final Logger logger = LoggerFactory.getLogger(BackupNotificationMgr.class); private final IConfiguration config; - private INotificationService notificationService; + private final IBackupRestoreConfig backupRestoreConfig; + private final INotificationService notificationService; + private final InstanceInfo instanceInfo; + private final InstanceIdentity instanceIdentity; + private final Set notifiedBackupFileTypesSet; + private String notifiedBackupFileTypes; @Inject - public BackupNotificationMgr(IConfiguration config, INotificationService notificationService) { + public BackupNotificationMgr( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + INotificationService notificationService, + InstanceInfo instanceInfo, + InstanceIdentity instanceIdentity) { this.config = config; + this.backupRestoreConfig = backupRestoreConfig; this.notificationService = notificationService; + this.instanceInfo = instanceInfo; + this.instanceIdentity = instanceIdentity; + this.notifiedBackupFileTypesSet = new HashSet<>(); + this.notifiedBackupFileTypes = ""; } - private void notify(AbstractBackupPath abp, String uploadStatus) { + public void notify(BackupVerificationResult backupVerificationResult) { JSONObject jsonObject = new JSONObject(); try { jsonObject.put("s3bucketname", this.config.getBackupPrefix()); - jsonObject.put("s3clustername", abp.getClusterName()); - jsonObject.put("s3namespace", abp.getRemotePath()); - jsonObject.put("keyspace", abp.getKeyspace()); - jsonObject.put("cf", abp.getColumnFamily()); - jsonObject.put("region", abp.getRegion()); - jsonObject.put("rack", this.config.getRac()); - jsonObject.put("token", abp.getToken()); - jsonObject.put("filename", abp.getFileName()); - jsonObject.put("uncompressfilesize", abp.getSize()); - jsonObject.put("compressfilesize", abp.getCompressedFileSize()); - jsonObject.put("backuptype", abp.getType().name()); - jsonObject.put("uploadstatus", uploadStatus); - - //SNS Attributes for filtering messages. Cluster name and backup file type. - Map messageAttributes = new HashMap<>(); - messageAttributes.putIfAbsent("s3clustername", new MessageAttributeValue().withDataType("String").withStringValue(abp.getClusterName())); - messageAttributes.putIfAbsent("backuptype", new MessageAttributeValue().withDataType("String").withStringValue(abp.getType().name())); + jsonObject.put("s3clustername", config.getAppName()); + jsonObject.put("s3namespace", backupVerificationResult.remotePath); + jsonObject.put("region", instanceInfo.getRegion()); + jsonObject.put("rack", instanceInfo.getRac()); + jsonObject.put("token", instanceIdentity.getInstance().getToken()); + jsonObject.put( + "backuptype", AbstractBackupPath.BackupFileType.SNAPSHOT_VERIFIED.name()); + jsonObject.put("snapshotInstant", backupVerificationResult.snapshotInstant); + // SNS Attributes for filtering messages. Cluster name and backup file type. + Map messageAttributes = getMessageAttributes(jsonObject); this.notificationService.notify(jsonObject.toString(), messageAttributes); } catch (JSONException exception) { - logger.error("JSON exception during generation of notification for upload {}. Local file {}. Ignoring to continue with rest of backup. Msg: {}", uploadStatus, abp.getFileName(), exception.getLocalizedMessage()); + logger.error( + "JSON exception during generation of notification for snapshot verification: {}. Msg: {}", + backupVerificationResult, + exception.getLocalizedMessage()); } } + private Map getMessageAttributes(JSONObject message) + throws JSONException { + Map attributes = new HashMap<>(); + attributes.put("s3clustername", toStringAttribute(message.getString("s3clustername"))); + attributes.put("backuptype", toStringAttribute(message.getString("backuptype"))); + for (String attr : backupRestoreConfig.getBackupNotificationAdditionalMessageAttrs()) { + if (message.has(attr)) { + attributes.put(attr, toStringAttribute(String.valueOf(message.get(attr)))); + } + } + return attributes; + } + + private MessageAttributeValue toStringAttribute(String value) { + return new MessageAttributeValue().withDataType("String").withStringValue(value); + } + + private void notify(AbstractBackupPath abp, String uploadStatus) { + JSONObject jsonObject = new JSONObject(); + try { + Set updatedNotifiedBackupFileTypeSet = + getUpdatedNotifiedBackupFileTypesSet(this.notifiedBackupFileTypes); + if (updatedNotifiedBackupFileTypeSet.isEmpty() + || updatedNotifiedBackupFileTypeSet.contains(abp.getType())) { + jsonObject.put("s3bucketname", this.config.getBackupPrefix()); + jsonObject.put("s3clustername", abp.getClusterName()); + jsonObject.put("s3namespace", abp.getRemotePath()); + jsonObject.put("keyspace", abp.getKeyspace()); + jsonObject.put("cf", abp.getColumnFamily()); + jsonObject.put("region", abp.getRegion()); + jsonObject.put("rack", instanceInfo.getRac()); + jsonObject.put("token", abp.getToken()); + jsonObject.put("filename", abp.getFileName()); + jsonObject.put("uncompressfilesize", abp.getSize()); + jsonObject.put("compressfilesize", abp.getCompressedFileSize()); + jsonObject.put("backuptype", abp.getType().name()); + jsonObject.put("uploadstatus", uploadStatus); + jsonObject.put("compression", abp.getCompression().name()); + jsonObject.put("encryption", abp.getEncryption().name()); + jsonObject.put("isincremental", abp.isIncremental()); + + // SNS Attributes for filtering messages. Cluster name and backup file type. + Map messageAttributes = + getMessageAttributes(jsonObject); + + this.notificationService.notify(jsonObject.toString(), messageAttributes); + } else { + logger.debug( + "BackupFileType {} is not in the list of notified component types {}", + abp.getType().name(), + StringUtils.join(notifiedBackupFileTypesSet, ", ")); + } + } catch (JSONException exception) { + logger.error( + "JSON exception during generation of notification for upload {}. Local file {}. Ignoring to continue with rest of backup. Msg: {}", + uploadStatus, + abp.getFileName(), + exception.getLocalizedMessage()); + } + } + + private Set getUpdatedNotifiedBackupFileTypesSet( + String notifiedBackupFileTypes) { + String propertyValue = this.backupRestoreConfig.getBackupNotifyComponentIncludeList(); + if (!notifiedBackupFileTypes.equals(propertyValue)) { + logger.info( + String.format( + "Notified BackupFileTypes changed from %s to %s", + this.notifiedBackupFileTypes, propertyValue)); + this.notifiedBackupFileTypesSet.clear(); + this.notifiedBackupFileTypes = + this.backupRestoreConfig.getBackupNotifyComponentIncludeList(); + if (!StringUtils.isBlank(this.notifiedBackupFileTypes)) { + for (String s : this.notifiedBackupFileTypes.split(",")) { + try { + AbstractBackupPath.BackupFileType backupFileType = + AbstractBackupPath.BackupFileType.fromString(s.trim()); + notifiedBackupFileTypesSet.add(backupFileType); + } catch (BackupRestoreException ignored) { + } + } + } + } + return Collections.unmodifiableSet(this.notifiedBackupFileTypesSet); + } + @Override public void updateEventStart(BackupEvent event) { notify(event.getAbstractBackupPath(), STARTED); @@ -94,5 +193,4 @@ public void updateEventSuccess(BackupEvent event) { public void updateEventStop(BackupEvent event) { // Do nothing. } - } diff --git a/priam/src/main/java/com/netflix/priam/notification/EventGenerator.java b/priam/src/main/java/com/netflix/priam/notification/EventGenerator.java index 068f46503..99b28f02b 100644 --- a/priam/src/main/java/com/netflix/priam/notification/EventGenerator.java +++ b/priam/src/main/java/com/netflix/priam/notification/EventGenerator.java @@ -18,24 +18,24 @@ package com.netflix.priam.notification; /** - * Generic interface which an event generator class should implement so all the observers could be notified of the events. - * Any class interested in state change for the event can subscribe by registering themselves. - * Created by aagrawal on 8/11/17. + * Generic interface which an event generator class should implement so all the observers could be + * notified of the events. Any class interested in state change for the event can subscribe by + * registering themselves. Created by aagrawal on 8/11/17. */ public interface EventGenerator { /** * Subscribes {@code observer} to receive generated events. * - * @param observer {@link EventObserver} interested in receiving updates from - * this event generator. May not be null. + * @param observer {@link EventObserver} interested in receiving updates from this event + * generator. May not be null. */ void addObserver(EventObserver observer); /** * Removes {@code observer} from receiving any further events from this generator. * - * @param observer {@link EventObserver} that is to stop receiving updates - * from this event generator. May not be null. + * @param observer {@link EventObserver} that is to stop receiving updates from this event + * generator. May not be null. */ void removeObserver(EventObserver observer); diff --git a/priam/src/main/java/com/netflix/priam/notification/EventObserver.java b/priam/src/main/java/com/netflix/priam/notification/EventObserver.java index 21cb02bd1..3d9326828 100644 --- a/priam/src/main/java/com/netflix/priam/notification/EventObserver.java +++ b/priam/src/main/java/com/netflix/priam/notification/EventObserver.java @@ -18,8 +18,8 @@ package com.netflix.priam.notification; /** - * Subscriber who wishes to receive event notifications from the {@link EventGenerator} - * Created by aagrawal on 8/11/17. + * Subscriber who wishes to receive event notifications from the {@link EventGenerator} Created by + * aagrawal on 8/11/17. */ public interface EventObserver { /** diff --git a/priam/src/main/java/com/netflix/priam/notification/INotificationService.java b/priam/src/main/java/com/netflix/priam/notification/INotificationService.java index 62725eba5..7fa036f6e 100644 --- a/priam/src/main/java/com/netflix/priam/notification/INotificationService.java +++ b/priam/src/main/java/com/netflix/priam/notification/INotificationService.java @@ -1,36 +1,30 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.notification; import com.amazonaws.services.sns.model.MessageAttributeValue; import com.google.inject.ImplementedBy; - import java.util.Map; -/** - * Service to notify of a message. - * Created by vinhn on 11/3/16. - */ +/** Service to notify of a message. Created by vinhn on 11/3/16. */ @ImplementedBy(AWSSnsNotificationService.class) interface INotificationService { /** * Notify the message. + * * @param msg Message that needs to be notified * @param messageAttributes Message attributes to be used while sending the message. */ void notify(String msg, Map messageAttributes); - } diff --git a/priam/src/main/java/com/netflix/priam/resources/BackupServlet.java b/priam/src/main/java/com/netflix/priam/resources/BackupServlet.java index 88754fc72..31c9efdcc 100644 --- a/priam/src/main/java/com/netflix/priam/resources/BackupServlet.java +++ b/priam/src/main/java/com/netflix/priam/resources/BackupServlet.java @@ -16,43 +16,29 @@ */ package com.netflix.priam.resources; -import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.google.inject.Provider; import com.google.inject.name.Named; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.PriamServer; import com.netflix.priam.backup.*; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.restore.Restore; -import com.netflix.priam.tuner.ICassandraTuner; +import com.netflix.priam.backup.BackupVersion; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.scheduler.PriamScheduler; -import com.netflix.priam.utils.*; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.exception.ExceptionUtils; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.DateUtil.DateRange; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.*; +import java.util.stream.Collectors; +import javax.ws.rs.*; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; -import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.*; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.io.File; -import java.io.IOException; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.*; -import java.util.stream.Collectors; - @Path("/v1/backup") @Produces(MediaType.APPLICATION_JSON) public class BackupServlet { @@ -61,55 +47,32 @@ public class BackupServlet { private static final String REST_SUCCESS = "[\"ok\"]"; private static final String REST_HEADER_RANGE = "daterange"; private static final String REST_HEADER_FILTER = "filter"; - private static final String REST_HEADER_TOKEN = "token"; - private static final String REST_HEADER_REGION = "region"; - private static final String REST_KEYSPACES = "keyspaces"; - private static final String REST_RESTORE_PREFIX = "restoreprefix"; - private static final String FMT = "yyyyMMddHHmm"; - private static final String REST_LOCR_ROWKEY = "verifyrowkey"; - private static final String REST_LOCR_KEYSPACE = "verifyks"; - private static final String REST_LOCR_COLUMNFAMILY = "verifycf"; - private static final String REST_LOCR_FILEEXTENSION = "verifyfileextension"; - private static final String SSTABLE2JSON_DIR_LOCATION = "/tmp/priam_sstables"; - private static final String SSTABLE2JSON_COMMAND_FROM_CASSHOME = "/bin/sstable2json"; - - private PriamServer priamServer; - private IConfiguration config; - private IBackupFileSystem backupFs; - private IBackupFileSystem bkpStatusFs; - private Restore restoreObj; - private Provider pathProvider; - private ICassandraTuner tuner; - private SnapshotBackup snapshotBackup; - private IPriamInstanceFactory factory; - private final ITokenManager tokenManager; - private final ICassandraProcess cassProcess; - private BackupVerification backupVerification; - @Inject - private PriamScheduler scheduler; - @Inject - private MetaData metaData; - - private IBackupStatusMgr completedBkups; + private final IConfiguration config; + private final IBackupRestoreConfig backupRestoreConfig; + private final IBackupFileSystem backupFs; + private final SnapshotBackup snapshotBackup; + private final BackupVerification backupVerification; + @Inject private PriamScheduler scheduler; + private final IBackupStatusMgr completedBkups; + private final BackupService backupService; + @Inject private MetaData metaData; @Inject - public BackupServlet(PriamServer priamServer, IConfiguration config, @Named("backup")IBackupFileSystem backupFs,@Named("backup_status")IBackupFileSystem bkpStatusFs, Restore restoreObj, Provider pathProvider, ICassandraTuner tuner, - SnapshotBackup snapshotBackup, IPriamInstanceFactory factory, ITokenManager tokenManager, ICassandraProcess cassProcess - ,IBackupStatusMgr completedBkups, BackupVerification backupVerification) - { - this.priamServer = priamServer; + public BackupServlet( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + @Named("backup") IBackupFileSystem backupFs, + SnapshotBackup snapshotBackup, + IBackupStatusMgr completedBkups, + BackupVerification backupVerification, + BackupService backupService) { this.config = config; + this.backupRestoreConfig = backupRestoreConfig; this.backupFs = backupFs; - this.bkpStatusFs = bkpStatusFs; - this.restoreObj = restoreObj; - this.pathProvider = pathProvider; - this.tuner = tuner; this.snapshotBackup = snapshotBackup; - this.factory = factory; - this.tokenManager = tokenManager; - this.cassProcess = cassProcess; this.completedBkups = completedBkups; this.backupVerification = backupVerification; + this.backupService = backupService; } @GET @@ -122,54 +85,57 @@ public Response backup() throws Exception { @GET @Path("/incremental_backup") public Response backupIncrementals() throws Exception { - scheduler.addTask("IncrementalBackup", IncrementalBackup.class, IncrementalBackup.getTimer()); + scheduler.addTask( + "IncrementalBackup", + IncrementalBackup.class, + IncrementalBackup.getTimer(config, backupRestoreConfig)); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); } + @GET + @Path("/updateService") + public Response updateService() throws Exception { + backupService.onChangeUpdateService(); + return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); + } @GET @Path("/list") /* * Fetch the list of files for the requested date range. - * + * * @param date range - * @param filter. The type of data files fetched. E.g. META will only fetch the dailsy snapshot meta data file (meta.json). + * @param filter. The type of data files fetched. E.g. META will only fetch the daily snapshot meta data file (meta.json). * @return the list of files in json format as part of the Http response body. */ - public Response list(@QueryParam(REST_HEADER_RANGE) String daterange, @QueryParam(REST_HEADER_FILTER) @DefaultValue("") String filter) throws Exception { - Date startTime; - Date endTime; - - if (StringUtils.isBlank(daterange) || daterange.equalsIgnoreCase("default")) { - startTime = new DateTime().minusDays(1).toDate(); - endTime = new DateTime().toDate(); - } else { - String[] restore = daterange.split(","); - AbstractBackupPath path = pathProvider.get(); - startTime = path.parseDate(restore[0]); - endTime = path.parseDate(restore[1]); - } - - logger.info("Parameters: {backupPrefix: [{}], daterange: [{}], filter: [{}]}", - config.getBackupPrefix(), daterange, filter); - - Iterator it = bkpStatusFs.list(config.getBackupPrefix(), startTime, endTime); + public Response list( + @QueryParam(REST_HEADER_RANGE) String daterange, + @QueryParam(REST_HEADER_FILTER) @DefaultValue("") String filter) + throws Exception { + + logger.info( + "Parameters: {backupPrefix: [{}], daterange: [{}], filter: [{}]}", + config.getBackupPrefix(), + daterange, + filter); + + DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange); + + Iterator it = + backupFs.list( + config.getBackupPrefix(), + Date.from(dateRange.getStartTime()), + Date.from(dateRange.getEndTime())); JSONObject object = new JSONObject(); object = constructJsonResponse(object, it, filter); return Response.ok(object.toString(2), MediaType.APPLICATION_JSON).build(); } - @GET @Path("/status") @Produces(MediaType.APPLICATION_JSON) public Response status() throws Exception { - int restoreTCount = restoreObj.getActiveCount(); //Active threads performing the restore - logger.debug("Thread counts for restore is: {}", restoreTCount); - int backupTCount = backupFs.getActivecount(); - logger.debug("Thread counts for snapshot backup is: {}", backupTCount); JSONObject object = new JSONObject(); - object.put("ThreadCount", new Integer(backupTCount)); //Number of active threads performing the snapshot backups object.put("SnapshotStatus", snapshotBackup.state().toString()); return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build(); } @@ -184,41 +150,27 @@ public Response status() throws Exception { @Path("/status/{date}") @Produces(MediaType.APPLICATION_JSON) public Response statusByDate(@PathParam("date") String date) throws Exception { - JSONObject object = new JSONObject(); - List metadataLinkedList = this.completedBkups.locate(date); + Instant startTime = DateUtil.parseInstant(date); + Optional backupMetadataOptional = + this.completedBkups + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange( + startTime.truncatedTo(ChronoUnit.DAYS), + startTime + .plus(1, ChronoUnit.DAYS) + .truncatedTo(ChronoUnit.DAYS))) + .stream() + .findFirst(); - if (metadataLinkedList != null && !metadataLinkedList.isEmpty()) { - // backup exist base on requested date, lets fetch more of its metadata - BackupMetadata bkupMetadata = metadataLinkedList.get(0); - object.put("Snapshotstatus", bkupMetadata.getStatus().equals(Status.FINISHED)); - String token = bkupMetadata.getToken(); - if (token != null && !token.isEmpty()) { - object.put("token", bkupMetadata.getToken()); - } else { - object.put("token", "not available"); - } - if (bkupMetadata.getStart() != null) { - object.put("starttime", DateUtil.formatyyyyMMddHHmm(bkupMetadata.getStart())); - } else { - object.put("starttime", "not available"); - } - - if (bkupMetadata.getCompleted() != null) { - object.put("completetime", DateUtil.formatyyyyMMddHHmm(bkupMetadata.getCompleted())); - } else { - object.put("completetime", "not_available"); - } - - } else { //Backup do not exist for that date. + JSONObject object = new JSONObject(); + if (!backupMetadataOptional.isPresent()) { object.put("Snapshotstatus", false); - String token = SystemUtils.getDataFromUrl("http://localhost:8080/Priam/REST/v1/cassconfig/get_token"); - if (token != null && !token.isEmpty()) { - object.put("token", token); - } else { - object.put("token", "not available"); - } - } + } else { + object.put("Snapshotstatus", true); + object.put("Details", new JSONObject(backupMetadataOptional.get().toString())); + } return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build(); } @@ -234,36 +186,25 @@ public Response statusByDate(@PathParam("date") String date) throws Exception { public Response snapshotsByDate(@PathParam("date") String date) throws Exception { List metadata = this.completedBkups.locate(date); JSONObject object = new JSONObject(); - List snapshots = new ArrayList(); + List snapshots = new ArrayList<>(); if (metadata != null && !metadata.isEmpty()) - snapshots.addAll(metadata.stream().map(backupMetadata -> DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart())).collect(Collectors.toList())); + snapshots.addAll( + metadata.stream() + .filter( + backupMetadata -> + backupMetadata + .getBackupVersion() + .equals(BackupVersion.SNAPSHOT_BACKUP)) + .map( + backupMetadata -> + DateUtil.formatyyyyMMddHHmm(backupMetadata.getStart())) + .collect(Collectors.toList())); object.put("Snapshots", snapshots); return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build(); } - private List getLatestBackupMetadata(Date startTime, Date endTime) { - List backupMetadata = this.completedBkups.locate(endTime); - if (backupMetadata != null && !backupMetadata.isEmpty()) - return backupMetadata; - if (DateUtil.formatyyyyMMdd(startTime).equals(DateUtil.formatyyyyMMdd(endTime))) { - logger.info("Start & end date are same. No SNAPSHOT found for date: {}", DateUtil.formatyyyyMMdd(endTime)); - return null; - } else { - Date previousDay = new Date(endTime.getTime()); - do { - //We need to find the latest backupmetadata in this date range. - previousDay = new DateTime(previousDay.getTime()).minusDays(1).toDate(); - logger.info("Will try to find snapshot for previous day: {}", DateUtil.formatyyyyMMdd(previousDay)); - backupMetadata = completedBkups.locate(previousDay); - if (backupMetadata != null && !backupMetadata.isEmpty()) - return backupMetadata; - } while (!DateUtil.formatyyyyMMdd(startTime).equals(DateUtil.formatyyyyMMdd(previousDay))); - } - return null; - } - /* * Determines the validity of the backup by i) Downloading meta.json file ii) Listing of the backup directory * iii) Find the missing or extra files in backup location. @@ -273,203 +214,34 @@ private List getLatestBackupMetadata(Date startTime, Date endTim @GET @Path("/validate/snapshot/{daterange}") @Produces(MediaType.APPLICATION_JSON) - public Response validateSnapshotByDate(@PathParam("daterange") String daterange) throws Exception { - - Date startTime; - Date endTime; - - if (StringUtils.isBlank(daterange) || daterange.equalsIgnoreCase("default")) { - startTime = new DateTime().minusDays(1).toDate(); - endTime = new DateTime().toDate(); - } else { - String[] dates = daterange.split(","); - startTime = DateUtil.getDate(dates[0]); - endTime = DateUtil.getDate(dates[1]); + public Response validateSnapshotByDate( + @PathParam("daterange") String daterange, + @DefaultValue("false") @QueryParam("force") boolean force) + throws Exception { + DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange); + Optional result = + backupVerification.verifyBackup(BackupVersion.SNAPSHOT_BACKUP, force, dateRange); + if (!result.isPresent()) { + return Response.noContent() + .entity("No valid meta found for provided time range") + .build(); } - JSONObject jsonReply = new JSONObject(); - jsonReply.put("inputStartDate", DateUtil.formatyyyyMMddHHmm(startTime)); - jsonReply.put("inputEndDate", DateUtil.formatyyyyMMddHHmm(endTime)); - logger.info("Will try to validate latest backup during startTime: {}, and endTime: {}", DateUtil.formatyyyyMMddHHmm(startTime), DateUtil.formatyyyyMMddHHmm(endTime)); - - List metadata = getLatestBackupMetadata(startTime, endTime); - BackupVerificationResult result = backupVerification.verifyBackup(metadata, startTime); - jsonReply.put("snapshotAvailable", result.snapshotAvailable); - jsonReply.put("valid", result.valid); - jsonReply.put("backupFileListAvailable", result.backupFileListAvail); - jsonReply.put("metaFileFound", result.metaFileFound); - jsonReply.put("selectedDate", result.selectedDate); - jsonReply.put("snapshotTime", result.snapshotTime); - jsonReply.put("filesInMetaOnly", result.filesInMetaOnly); - jsonReply.put("filesInS3Only", result.filesInS3Only); - jsonReply.put("filesMatched", result.filesMatched); - return Response.ok(jsonReply.toString()).build(); - } - - /** - *

- * Life_Of_C*Row : With this REST call, mutations/existence of a rowkey can be found. - * It uses SSTable2Json utility which will convert SSTables on disk to JSON format and - * Search for the desired rowkey. - *

- * Steps include: - * 1. Restoring data for given data range and other params - * 2. Searching provided rowkey in SSTables and writing search result to JSON - * 3. Delete all the files under Keyspace Directory. - * Deletion is done for efficient space usage, so that same node can be reused for - * subsequent runs. - *

- *

- * Similar to Restore call and few additional params. - *

- * daterange : Can not be Null or Default. Comma separated Start and End date eg. 201311250000,201311260000 - * rowkey : rowkey to search (In Hex format) - * ks : keyspace of mentioned rowkey - * cf : column family of mentioned rowkey - * fileExtension : Part of SSTable Data file names - * eg. if file name = KS1-CF1-hf-100-Data.db - * then fileExtension = KS1-CF1-hf - * - * @return Creates JSON file based on the passed date at hardcoded dir location : /tmp/priam_sstables - * If rowkey is not found in the SSTable, JSON file will be empty. - */ - @GET - @Path("/life_of_crow") - @Produces(MediaType.APPLICATION_JSON) - public Response restore_verify_key( - @QueryParam(REST_HEADER_RANGE) String daterange, - @QueryParam(REST_HEADER_REGION) String region, - @QueryParam(REST_HEADER_TOKEN) String token, - @QueryParam(REST_KEYSPACES) String keyspaces, - @QueryParam(REST_RESTORE_PREFIX) String restorePrefix, - @QueryParam(REST_LOCR_ROWKEY) String rowkey, - @QueryParam(REST_LOCR_KEYSPACE) String ks, - @QueryParam(REST_LOCR_COLUMNFAMILY) String cf, - @QueryParam(REST_LOCR_FILEEXTENSION) String fileExtension) throws Exception { - - Date startTime; - Date endTime; - //Creating Dir for Json storage - SystemUtils.createDirs(SSTABLE2JSON_DIR_LOCATION); - String JSON_FILE_PATH = ""; - - try { - - if (StringUtils.isBlank(daterange) - || daterange.equalsIgnoreCase("default")) { - return Response.ok("\n[\"daterange can't be blank or default.eg.201311250000,201311260000\"]\n", MediaType.APPLICATION_JSON) - .build(); - } - - String[] restore = daterange.split(","); - AbstractBackupPath path = pathProvider.get(); - startTime = path.parseDate(restore[0]); - endTime = path.parseDate(restore[1]); - - String origRestorePrefix = config.getRestorePrefix(); - if (StringUtils.isNotBlank(restorePrefix)) { - config.setRestorePrefix(restorePrefix); - } - - - restore(token, region, startTime, endTime, keyspaces); - - // Since this call is probably never called in parallel, config is - // multi-thread safe to be edited - config.setRestorePrefix(origRestorePrefix); - - while (!CassandraMonitor.hasCassadraStarted()) - Thread.sleep(1000L); - - // initialize json file name - JSON_FILE_PATH = daterange.split(",")[0].substring(0, 8) + ".json"; - - //Convert SSTable2Json and search for given rowkey - checkSSTablesForKey(rowkey, ks, cf, fileExtension, JSON_FILE_PATH); - - } catch (Exception e) { - logger.info(ExceptionUtils.getStackTrace(e)); - } finally { - removeAllDataFiles(ks); - } - - return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON) - .build(); - } - - /** - * Restore with the specified start and end time. - * - * @param token Overrides the current token with this one, if specified - * @param region Override the region for searching backup - * @param startTime Start time - * @param endTime End time upto which the restore should fetch data - * @param keyspaces Comma seperated list of keyspaces to restore - * @throws Exception if restore is not successful - */ - private void restore(String token, String region, Date startTime, Date endTime, String keyspaces) throws Exception { - String origRegion = config.getDC(); - String origToken = priamServer.getId().getInstance().getToken(); - if (StringUtils.isNotBlank(token)) - priamServer.getId().getInstance().setToken(token); - - if (config.isRestoreClosestToken()) - priamServer.getId().getInstance().setToken(closestToken(priamServer.getId().getInstance().getToken(), config.getDC())); - - if (StringUtils.isNotBlank(region)) { - config.setDC(region); - logger.info("Restoring from region {}", region); - priamServer.getId().getInstance().setToken(closestToken(priamServer.getId().getInstance().getToken(), region)); - logger.info("Restore will use token {}", priamServer.getId().getInstance().getToken()); - } - - setRestoreKeyspaces(keyspaces); - - try { - restoreObj.restore(startTime, endTime); - } finally { - config.setDC(origRegion); - priamServer.getId().getInstance().setToken(origToken); - } - tuner.updateAutoBootstrap(config.getYamlLocation(), false); - cassProcess.start(true); - } - - /** - * Find closest token in the specified region - */ - private String closestToken(String token, String region) { - List plist = factory.getAllIds(config.getAppName()); - List tokenList = Lists.newArrayList(); - for (PriamInstance ins : plist) { - if (ins.getDC().equalsIgnoreCase(region)) - tokenList.add(new BigInteger(ins.getToken())); - } - return tokenManager.findClosestToken(new BigInteger(token), tokenList).toString(); - } - - /* - * TODO: decouple the servlet, config, and restorer. this should not rely on a side - * effect of a list mutation on the config object (treating it as global var). - */ - private void setRestoreKeyspaces(String keyspaces) { - if (StringUtils.isNotBlank(keyspaces)) { - List newKeyspaces = Lists.newArrayList(keyspaces.split(",")); - config.setRestoreKeySpaces(newKeyspaces); - } + return Response.ok(result.get().toString()).build(); } /* - * A list of files for requested filter. Currently, the only supported filter is META, all others will be ignore. + * A list of files for requested filter. Currently, the only supported filter is META, all others will be ignore. * For filter of META, ONLY the daily snapshot meta file (meta.json) are accounted for, not the incremental meta file. * In addition, we do ONLY list the name of the meta data file, not the list of data files within it. - * + * * @param handle to the json response * @param a list of all files (data (*.db), and meta data file (*.json)) from S3 for requested dates. * @param backup meta data file filter. Currently, the only supported filter is META, all others will be ignore. * @return a list of files in Json format. */ - private JSONObject constructJsonResponse(JSONObject object, Iterator it, String filter) throws Exception { + private JSONObject constructJsonResponse( + JSONObject object, Iterator it, String filter) throws Exception { int fileCnt = 0; filter = filter.contains("?") ? filter.substring(0, filter.indexOf("?")) : filter; @@ -477,32 +249,31 @@ private JSONObject constructJsonResponse(JSONObject object, Iterator callable = new Callable() { - @Override - public Integer call() throws Exception { - return p.waitFor(); - } - }; - - ExecutorService exeService = Executors.newSingleThreadExecutor(); - try { - Future future = exeService.submit(callable); - int returnVal = future.get(TIMEOUT_PERIOD, TimeUnit.MINUTES); - if (returnVal == 0) - logger.info("Finished SSTable2Json conversion and search."); - else - logger.error("Error occurred during SSTable2Json conversion and search."); - } catch (TimeoutException e) { - logger.error(ExceptionUtils.getStackTrace(e)); - throw e; - } finally { - p.destroy(); - exeService.shutdown(); - } - - } catch (IOException e) { - logger.error(ExceptionUtils.getStackTrace(e)); - } - } - - public String formulateCommandToRun(String rowkey, String keyspace, String cf, String fileExtension, String jsonFilePath) { - StringBuffer sbuff = new StringBuffer(); - - sbuff.append("for i in $(ls " + config.getDataFileLocation() + File.separator + keyspace + File.separator + cf + File.separator + fileExtension + "-*-Data.db); do " + config.getCassHome() + SSTABLE2JSON_COMMAND_FROM_CASSHOME + " $i -k "); - sbuff.append(rowkey); - sbuff.append(" | grep "); - sbuff.append(rowkey); - sbuff.append(" >> "); - sbuff.append(SSTABLE2JSON_DIR_LOCATION + File.separator + jsonFilePath); - sbuff.append(" ; done"); - - logger.info("SSTable2JSON location <" + SSTABLE2JSON_DIR_LOCATION + "{}{}>", File.separator, jsonFilePath); - logger.info("Running Command = {}", sbuff); - return sbuff.toString(); - } - - public void removeAllDataFiles(String ks) throws Exception { - String cleanupDirPath = config.getDataFileLocation() + File.separator + ks; - logger.info("Starting to clean all the files inside <{}>", cleanupDirPath); - SystemUtils.cleanupDir(cleanupDirPath, null); - logger.info("*** Done cleaning all the files inside <{}>", cleanupDirPath); - } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/resources/BackupServletV2.java b/priam/src/main/java/com/netflix/priam/resources/BackupServletV2.java new file mode 100644 index 000000000..681306468 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/resources/BackupServletV2.java @@ -0,0 +1,166 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.resources; + +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.netflix.priam.backup.*; +import com.netflix.priam.backupv2.BackupTTLTask; +import com.netflix.priam.backupv2.BackupV2Service; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.backupv2.SnapshotMetaTask; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.DateUtil.DateRange; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.inject.Named; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 1/16/19. */ +@Path("/v2/backup") +@Produces(MediaType.APPLICATION_JSON) +public class BackupServletV2 { + private static final Logger logger = LoggerFactory.getLogger(BackupServletV2.class); + private final BackupVerification backupVerification; + private final IBackupStatusMgr backupStatusMgr; + private final SnapshotMetaTask snapshotMetaService; + private final BackupTTLTask backupTTLService; + private final IBackupFileSystem fs; + private final IMetaProxy metaProxy; + private final Provider pathProvider; + private final BackupV2Service backupService; + private static final String REST_SUCCESS = "[\"ok\"]"; + + @Inject + public BackupServletV2( + IBackupStatusMgr backupStatusMgr, + BackupVerification backupVerification, + SnapshotMetaTask snapshotMetaService, + BackupTTLTask backupTTLService, + IConfiguration configuration, + IFileSystemContext backupFileSystemCtx, + @Named("v2") IMetaProxy metaV2Proxy, + Provider pathProvider, + BackupV2Service backupService) { + this.backupStatusMgr = backupStatusMgr; + this.backupVerification = backupVerification; + this.snapshotMetaService = snapshotMetaService; + this.backupTTLService = backupTTLService; + this.fs = backupFileSystemCtx.getFileStrategy(configuration); + this.metaProxy = metaV2Proxy; + this.pathProvider = pathProvider; + this.backupService = backupService; + } + + @GET + @Path("/do_snapshot") + public Response backup() throws Exception { + snapshotMetaService.execute(); + return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); + } + + @GET + @Path("/ttl") + public Response ttl() throws Exception { + backupTTLService.execute(); + return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); + } + + @GET + @Path("/clearCache") + public Response clearCache() throws Exception { + fs.clearCache(); + return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); + } + + @GET + @Path("/updateService") + public Response updateService() throws Exception { + backupService.onChangeUpdateService(); + return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); + } + + @GET + @Path("/info/{date}") + public Response info(@PathParam("date") String date) { + Instant instant = DateUtil.parseInstant(date); + List metadataList = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange( + instant, + instant.plus(1, ChronoUnit.DAYS).truncatedTo(ChronoUnit.DAYS))); + return Response.ok(GsonJsonSerializer.getGson().toJson(metadataList)).build(); + } + + @GET + @Path("/validate/{daterange}") + public Response validateV2SnapshotByDate( + @PathParam("daterange") String daterange, + @DefaultValue("false") @QueryParam("force") boolean force) + throws Exception { + DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange); + Optional result = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, force, dateRange); + if (!result.isPresent()) { + return Response.noContent() + .entity("No valid meta found for provided time range") + .build(); + } + + return Response.ok(result.get().toString()).build(); + } + + @GET + @Path("/list/{daterange}") + public Response list(@PathParam("daterange") String daterange) throws Exception { + DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange); + // Find latest valid meta file. + Optional latestValidMetaFile = + BackupRestoreUtil.getLatestValidMetaPath(metaProxy, dateRange); + if (!latestValidMetaFile.isPresent()) { + return Response.ok("No valid meta found!").build(); + } + List allFiles = + BackupRestoreUtil.getAllFiles( + latestValidMetaFile.get(), dateRange, metaProxy, pathProvider); + + return Response.ok( + GsonJsonSerializer.getGson() + .toJson( + allFiles.stream() + .map(AbstractBackupPath::getRemotePath) + .collect(Collectors.toList()))) + .build(); + } +} diff --git a/priam/src/main/java/com/netflix/priam/resources/CassandraAdmin.java b/priam/src/main/java/com/netflix/priam/resources/CassandraAdmin.java index 4b7dc0b99..54208791c 100644 --- a/priam/src/main/java/com/netflix/priam/resources/CassandraAdmin.java +++ b/priam/src/main/java/com/netflix/priam/resources/CassandraAdmin.java @@ -18,14 +18,23 @@ import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cluster.management.Compaction; import com.netflix.priam.cluster.management.Flush; import com.netflix.priam.compress.SnappyCompression; -import com.netflix.priam.utils.JMXConnectionException; -import com.netflix.priam.utils.JMXNodeTool; -import org.apache.cassandra.exceptions.ConfigurationException; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.CassandraOperations; +import com.netflix.priam.connection.JMXConnectionException; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.ICassandraProcess; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import javax.ws.rs.*; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; @@ -33,23 +42,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.*; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -/** - * Do general operations. Start/Stop and some JMX node tool commands - */ -@SuppressWarnings("deprecation") +/** Do general operations. Start/Stop and some JMX node tool commands */ @Path("/v1/cassadmin") @Produces(MediaType.APPLICATION_JSON) public class CassandraAdmin { private static final String REST_HEADER_KEYSPACES = "keyspaces"; - private static final String REST_HEADER_CFS = "cfnames"; private static final String REST_HEADER_TOKEN = "token"; private static final String REST_SUCCESS = "[\"ok\"]"; private static final Logger logger = LoggerFactory.getLogger(CassandraAdmin.class); @@ -57,32 +54,41 @@ public class CassandraAdmin { private final ICassandraProcess cassProcess; private final Flush flush; private final Compaction compaction; + private final CassandraOperations cassandraOperations; @Inject - public CassandraAdmin(IConfiguration config, ICassandraProcess cassProcess, Flush flush, Compaction compaction) { + public CassandraAdmin( + IConfiguration config, + ICassandraProcess cassProcess, + Flush flush, + Compaction compaction, + CassandraOperations cassandraOperations) { this.config = config; this.cassProcess = cassProcess; this.flush = flush; this.compaction = compaction; + this.cassandraOperations = cassandraOperations; } @GET @Path("/start") - public Response cassStart() throws IOException, InterruptedException, JSONException { + public Response cassStart() throws IOException { cassProcess.start(true); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); } @GET @Path("/stop") - public Response cassStop(@DefaultValue("false") @QueryParam("force") boolean force) throws IOException, InterruptedException, JSONException { + public Response cassStop(@DefaultValue("false") @QueryParam("force") boolean force) + throws IOException { cassProcess.stop(force); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); } @GET @Path("/refresh") - public Response cassRefresh(@QueryParam(REST_HEADER_KEYSPACES) String keyspaces) throws IOException, ExecutionException, InterruptedException, JSONException { + public Response cassRefresh(@QueryParam(REST_HEADER_KEYSPACES) String keyspaces) + throws IOException, ExecutionException, InterruptedException { logger.debug("node tool refresh is being called"); if (StringUtils.isBlank(keyspaces)) return Response.status(400).entity("Missing keyspace in request").build(); @@ -91,9 +97,7 @@ public Response cassRefresh(@QueryParam(REST_HEADER_KEYSPACES) String keyspaces) try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } nodeTool.refresh(Lists.newArrayList(keyspaces.split(","))); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); @@ -101,14 +105,12 @@ public Response cassRefresh(@QueryParam(REST_HEADER_KEYSPACES) String keyspaces) @GET @Path("/info") - public Response cassInfo() throws IOException, InterruptedException, JSONException { + public Response cassInfo() throws JSONException { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool info being called"); return Response.ok(nodeTool.info(), MediaType.APPLICATION_JSON).build(); @@ -116,37 +118,46 @@ public Response cassInfo() throws IOException, InterruptedException, JSONExcepti @GET @Path("/partitioner") - public Response cassPartitioner() throws IOException, InterruptedException, JSONException { + public Response cassPartitioner() { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool getPartitioner being called"); - return Response.ok(nodeTool.getPartitioner(), MediaType.APPLICATION_JSON).build(); + return Response.ok(nodeTool.getPartitioner(), MediaType.TEXT_PLAIN).build(); } @GET @Path("/ring/{id}") - public Response cassRing(@PathParam("id") String keyspace) throws IOException, InterruptedException, JSONException { + public Response cassRing(@PathParam("id") String keyspace) throws JSONException { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool ring being called"); return Response.ok(nodeTool.ring(keyspace), MediaType.APPLICATION_JSON).build(); } + @GET + @Path("/status") + public Response statusInfo() throws JSONException { + JMXNodeTool nodeTool; + try { + nodeTool = JMXNodeTool.instance(config); + } catch (JMXConnectionException e) { + return Response.status(503).entity("JMXConnectionException").build(); + } + logger.debug("node tool status being called"); + return Response.ok(nodeTool.statusInformation(), MediaType.APPLICATION_JSON).build(); + } + @GET @Path("/flush") - public Response cassFlush() throws IOException, InterruptedException, ExecutionException { + public Response cassFlush() { JSONObject rootObj = new JSONObject(); try { @@ -155,36 +166,32 @@ public Response cassFlush() throws IOException, InterruptedException, ExecutionE return Response.ok().entity(rootObj).build(); } catch (Exception e) { try { - rootObj.put("status", "ERRROR"); + rootObj.put("status", "ERROR"); rootObj.put("desc", e.getLocalizedMessage()); } catch (Exception e1) { - return Response.status(503).entity("FlushError") - .build(); + return Response.status(503).entity("FlushError").build(); } - return Response.status(503).entity(rootObj) - .build(); + return Response.status(503).entity(rootObj).build(); } } @GET @Path("/compact") - public Response cassCompact() throws IOException, ExecutionException, InterruptedException { + public Response cassCompact() { JSONObject rootObj = new JSONObject(); try { compaction.execute(); - rootObj.put("Compcated", true); + rootObj.put("Compacted", true); return Response.ok().entity(rootObj).build(); } catch (Exception e) { try { - rootObj.put("status", "ERRROR"); + rootObj.put("status", "ERROR"); rootObj.put("desc", e.getLocalizedMessage()); } catch (Exception e1) { - return Response.status(503).entity("CompactionError") - .build(); + return Response.status(503).entity("CompactionError").build(); } - return Response.status(503).entity(rootObj) - .build(); + return Response.status(503).entity(rootObj).build(); } } @@ -195,9 +202,7 @@ public Response cassCleanup() throws IOException, ExecutionException, Interrupte try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool cleanup being called"); nodeTool.cleanup(); @@ -206,14 +211,16 @@ public Response cassCleanup() throws IOException, ExecutionException, Interrupte @GET @Path("/repair") - public Response cassRepair(@QueryParam("sequential") boolean isSequential, @QueryParam("localDC") boolean localDCOnly, @DefaultValue("false") @QueryParam("primaryRange") boolean primaryRange) throws IOException, ExecutionException, InterruptedException { + public Response cassRepair( + @QueryParam("sequential") boolean isSequential, + @QueryParam("localDC") boolean localDCOnly, + @DefaultValue("false") @QueryParam("primaryRange") boolean primaryRange) + throws IOException, ExecutionException, InterruptedException { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool repair being called"); nodeTool.repair(isSequential, localDCOnly, primaryRange); @@ -222,28 +229,27 @@ public Response cassRepair(@QueryParam("sequential") boolean isSequential, @Quer @GET @Path("/version") - public Response version() throws IOException, ExecutionException, InterruptedException { + public Response version() { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } - return Response.ok(new JSONArray().put(nodeTool.getReleaseVersion()), MediaType.APPLICATION_JSON).build(); + return Response.ok( + new JSONArray().put(nodeTool.getReleaseVersion()), + MediaType.APPLICATION_JSON) + .build(); } @GET @Path("/disablegossip") - public Response disablegossip() throws IOException, ExecutionException, InterruptedException { + public Response disablegossip() { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } nodeTool.stopGossiping(); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); @@ -251,128 +257,37 @@ public Response disablegossip() throws IOException, ExecutionException, Interrup @GET @Path("/enablegossip") - public Response enablegossip() throws IOException, ExecutionException, InterruptedException { + public Response enablegossip() { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } nodeTool.startGossiping(); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); } - @GET - @Path("/disablethrift") - public Response disablethrift() throws IOException, ExecutionException, InterruptedException { - JMXNodeTool nodeTool; - try { - nodeTool = JMXNodeTool.instance(config); - } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); - } - nodeTool.stopThriftServer(); - return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); - } - - @GET - @Path("/enablethrift") - public Response enablethrift() throws IOException, ExecutionException, InterruptedException { - JMXNodeTool nodeTool; - try { - nodeTool = JMXNodeTool.instance(config); - } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); - } - nodeTool.startThriftServer(); - return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); - } - - @GET - @Path("/statusthrift") - public Response statusthrift() throws IOException, ExecutionException, InterruptedException, JSONException { - JMXNodeTool nodeTool; - try { - nodeTool = JMXNodeTool.instance(config); - } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); - } - return Response.ok(new JSONObject().put("status", (nodeTool.isThriftServerRunning() ? "running" : "not running")), MediaType.APPLICATION_JSON).build(); - } - @GET @Path("/gossipinfo") - public Response gossipinfo() throws IOException, ExecutionException, InterruptedException, JSONException { - JMXNodeTool nodeTool; - try { - nodeTool = JMXNodeTool.instance(config); - } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); - } - JSONObject rootObj = parseGossipInfo(nodeTool.getGossipInfo()); - return Response.ok(rootObj, MediaType.APPLICATION_JSON).build(); + public Response gossipinfo() throws Exception { + List> parsedInfo = cassandraOperations.gossipInfo(); + return Response.ok(parsedInfo, MediaType.APPLICATION_JSON).build(); } - - // helper method for parsing, to be tested easily - private static JSONObject parseGossipInfo(String gossipinfo) throws JSONException { - String[] ginfo = gossipinfo.split("\n"); - JSONObject rootObj = new JSONObject(); - JSONObject obj = new JSONObject(); - String key = ""; - for (String line : ginfo) { - if (line.matches("^.*/.*$")) { - String[] data = line.split("/"); - if (StringUtils.isNotBlank(key)) { - rootObj.put(key, obj); - obj = new JSONObject(); - } - key = data[1]; - } else if (line.matches("^ .*:.*$")) { - String[] kv = line.split(":"); - kv[0] = kv[0].trim(); - if (kv[0].equals("STATUS")) { - obj.put(kv[0], kv[1]); - String[] vv = kv[1].split(","); - obj.put("Token", vv[1]); - } else { - obj.put(kv[0], kv[1]); - } - } - } - if (StringUtils.isNotBlank(key)) - rootObj.put(key, obj); - return rootObj; - } - - @GET @Path("/move") - public Response moveToken(@QueryParam(REST_HEADER_TOKEN) String newToken) throws IOException, ExecutionException, InterruptedException, ConfigurationException { + public Response moveToken(@QueryParam(REST_HEADER_TOKEN) String newToken) throws IOException { JMXNodeTool nodeTool; try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } nodeTool.move(newToken); return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); } - - @GET @Path("/drain") public Response cassDrain() throws IOException, ExecutionException, InterruptedException { @@ -380,9 +295,7 @@ public Response cassDrain() throws IOException, ExecutionException, InterruptedE try { nodeTool = JMXNodeTool.instance(config); } catch (JMXConnectionException e) { - logger.error("Exception in fetching c* jmx tool . Msgl: {}", e.getLocalizedMessage(), e); - return Response.status(503).entity("JMXConnectionException") - .build(); + return Response.status(503).entity("JMXConnectionException").build(); } logger.debug("node tool drain being called"); nodeTool.drain(); @@ -396,7 +309,8 @@ public Response cassDrain() throws IOException, ExecutionException, InterruptedE */ @GET @Path("/decompress") - public Response decompress(@QueryParam("in") String in, @QueryParam("out") String out) throws Exception { + public Response decompress(@QueryParam("in") String in, @QueryParam("out") String out) + throws Exception { SnappyCompression compress = new SnappyCompression(); compress.decompressAndClose(new FileInputStream(in), new FileOutputStream(out)); JSONObject object = new JSONObject(); @@ -404,5 +318,4 @@ public Response decompress(@QueryParam("in") String in, @QueryParam("out") Strin object.put("Output decompress file", out); return Response.ok(object.toString(), MediaType.APPLICATION_JSON).build(); } - } diff --git a/priam/src/main/java/com/netflix/priam/resources/CassandraConfig.java b/priam/src/main/java/com/netflix/priam/resources/CassandraConfig.java index 13e076107..aacb0fe21 100644 --- a/priam/src/main/java/com/netflix/priam/resources/CassandraConfig.java +++ b/priam/src/main/java/com/netflix/priam/resources/CassandraConfig.java @@ -19,45 +19,51 @@ import com.google.inject.Inject; import com.netflix.priam.PriamServer; import com.netflix.priam.identity.DoubleRing; -import org.apache.commons.lang3.StringUtils; -import org.json.simple.JSONValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.priam.merics.CassMonitorMetrics; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import javax.ws.rs.GET; +import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import javax.ws.rs.core.Response.Status; +import org.apache.commons.lang3.StringUtils; +import org.json.simple.JSONValue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This servlet will provide the configuration API service as and when Cassandra - * requests for it. + * This servlet will provide the configuration API service as and when Cassandra requests for it. */ @Path("/v1/cassconfig") @Produces(MediaType.TEXT_PLAIN) public class CassandraConfig { private static final Logger logger = LoggerFactory.getLogger(CassandraConfig.class); - private PriamServer priamServer; - private DoubleRing doubleRing; + private final PriamServer priamServer; + private final DoubleRing doubleRing; + private final CassMonitorMetrics metrics; @Inject - public CassandraConfig(PriamServer server, DoubleRing doubleRing) { + public CassandraConfig(PriamServer server, DoubleRing doubleRing, CassMonitorMetrics metrics) { this.priamServer = server; this.doubleRing = doubleRing; + this.metrics = metrics; } @GET @Path("/get_seeds") public Response getSeeds() { try { - final List seeds = priamServer.getId().getSeeds(); - if (!seeds.isEmpty()) + final List seeds = priamServer.getInstanceIdentity().getSeeds(); + if (!seeds.isEmpty()) { + metrics.incGetSeeds(); return Response.ok(StringUtils.join(seeds, ',')).build(); + } logger.error("Cannot find the Seeds"); } catch (Exception e) { logger.error("Error while executing get_seeds", e); @@ -70,10 +76,12 @@ public Response getSeeds() { @Path("/get_token") public Response getToken() { try { - String token = priamServer.getId().getInstance().getToken(); + String token = priamServer.getInstanceIdentity().getInstance().getToken(); if (StringUtils.isNotBlank(token)) { logger.info("Returning token value \"{}\" for this instance to caller.", token); - return Response.ok(priamServer.getId().getInstance().getToken()).build(); + metrics.incGetToken(); + return Response.ok(priamServer.getInstanceIdentity().getInstance().getToken()) + .build(); } logger.error("Cannot find token for this instance."); @@ -89,7 +97,8 @@ public Response getToken() { @Path("/is_replace_token") public Response isReplaceToken() { try { - return Response.ok(String.valueOf(priamServer.getId().isReplace())).build(); + return Response.ok(String.valueOf(priamServer.getInstanceIdentity().isReplace())) + .build(); } catch (Exception e) { // TODO: can this ever happen? if so, what conditions would cause an exception here? logger.error("Error while executing is_replace_token", e); @@ -97,18 +106,31 @@ public Response isReplaceToken() { } } - @GET @Path("/get_replaced_ip") public Response getReplacedIp() { try { - return Response.ok(String.valueOf(priamServer.getId().getReplacedIp())).build(); + metrics.incGetReplacedIp(); + return Response.ok(String.valueOf(priamServer.getInstanceIdentity().getReplacedIp())) + .build(); } catch (Exception e) { logger.error("Error while executing get_replaced_ip", e); return Response.serverError().build(); } } + @POST + @Path("/set_replaced_ip") + public Response setReplacedIp(@QueryParam("ip") String ip) { + if (StringUtils.isEmpty(ip)) return Response.status(Status.BAD_REQUEST).build(); + try { + priamServer.getInstanceIdentity().setReplacedIp(ip); + return Response.ok().build(); + } catch (Exception e) { + logger.error("Error while overriding replacement ip", e); + return Response.serverError().build(); + } + } @GET @Path("/get_extra_env_params") @@ -117,7 +139,7 @@ public Response getExtraEnvParams() { Map returnMap; returnMap = priamServer.getConfiguration().getExtraEnvParams(); if (returnMap == null) { - returnMap = new HashMap(); + returnMap = new HashMap<>(); } String extraEnvParamsJson = JSONValue.toJSONString(returnMap); return Response.ok(extraEnvParamsJson).build(); @@ -127,11 +149,11 @@ public Response getExtraEnvParams() { } } - @GET @Path("/double_ring") public Response doubleRing() throws IOException, ClassNotFoundException { try { + metrics.incDoubleRing(); doubleRing.backup(); doubleRing.doubleSlots(); } catch (Throwable th) { @@ -142,4 +164,4 @@ public Response doubleRing() throws IOException, ClassNotFoundException { } return Response.status(200).build(); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/resources/PriamConfig.java b/priam/src/main/java/com/netflix/priam/resources/PriamConfig.java new file mode 100644 index 000000000..f6118b71f --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/resources/PriamConfig.java @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.resources; + +import com.google.inject.Inject; +import com.netflix.priam.PriamServer; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.util.HashMap; +import java.util.Map; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This servlet will provide the configuration API service for use by external scripts and tooling + */ +@Path("/v1/config") +@Produces(MediaType.APPLICATION_JSON) +public class PriamConfig { + private static final Logger logger = LoggerFactory.getLogger(PriamConfig.class); + private final PriamServer priamServer; + + @Inject + public PriamConfig(PriamServer server) { + this.priamServer = server; + } + + private Response doGetPriamConfig(String group, String name) { + try { + final Map result = new HashMap<>(); + final Map value = + priamServer.getConfiguration().getStructuredConfiguration(group); + if (name != null && value.containsKey(name)) { + result.put(name, value.get(name)); + return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build(); + } else if (name != null) { + result.put("message", String.format("No such structured config: [%s]", name)); + logger.error(String.format("No such structured config: [%s]", name)); + return Response.status(404) + .entity(GsonJsonSerializer.getGson().toJson(result)) + .type(MediaType.APPLICATION_JSON) + .build(); + } else { + result.putAll(value); + return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build(); + } + } catch (Exception e) { + logger.error("Error while executing getPriamConfig", e); + return Response.serverError().build(); + } + } + + @GET + @Path("/structured/{group}") + public Response getPriamConfig(@PathParam("group") String group) { + return doGetPriamConfig(group, null); + } + + @GET + @Path("/structured/{group}/{name}") + public Response getPriamConfigByName( + @PathParam("group") String group, @PathParam("name") String name) { + return doGetPriamConfig(group, name); + } + + @GET + @Path("/unstructured/{name}") + public Response getProperty( + @PathParam("name") String name, @QueryParam("default") String defaultValue) { + Map result = new HashMap<>(); + try { + String value = priamServer.getConfiguration().getProperty(name, defaultValue); + if (value != null) { + result.put(name, value); + return Response.ok(GsonJsonSerializer.getGson().toJson(result)).build(); + } else { + result.put("message", String.format("No such property: [%s]", name)); + logger.error(String.format("No such property: [%s]", name)); + return Response.status(404) + .entity(GsonJsonSerializer.getGson().toJson(result)) + .type(MediaType.APPLICATION_JSON) + .build(); + } + } catch (Exception e) { + logger.error("Error while executing getPriamConfig", e); + return Response.serverError().build(); + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/resources/PriamInstanceResource.java b/priam/src/main/java/com/netflix/priam/resources/PriamInstanceResource.java index 3085e697e..7c8eefc23 100644 --- a/priam/src/main/java/com/netflix/priam/resources/PriamInstanceResource.java +++ b/priam/src/main/java/com/netflix/priam/resources/PriamInstanceResource.java @@ -20,47 +20,47 @@ import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.IPriamInstanceFactory; import com.netflix.priam.identity.PriamInstance; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.priam.identity.config.InstanceInfo; +import java.net.URI; +import java.util.stream.Collectors; import javax.ws.rs.*; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriBuilder; -import java.net.URI; -import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -/** - * Resource for manipulating priam instances. - */ +/** Resource for manipulating priam instances. */ @Path("/v1/instances") @Produces(MediaType.TEXT_PLAIN) public class PriamInstanceResource { private static final Logger log = LoggerFactory.getLogger(PriamInstanceResource.class); private final IConfiguration config; - private final IPriamInstanceFactory factory; + private final IPriamInstanceFactory factory; + private final InstanceInfo instanceInfo; @Inject - //Note: do not parameterized the generic type variable to an implementation as it confuses Guice in the binding. - public PriamInstanceResource(IConfiguration config, IPriamInstanceFactory factory) { + // Note: do not parameterize the generic type variable to an implementation as it confuses + // Guice in the binding. + public PriamInstanceResource( + IConfiguration config, IPriamInstanceFactory factory, InstanceInfo instanceInfo) { this.config = config; this.factory = factory; + this.instanceInfo = instanceInfo; } /** * Get the list of all priam instances + * * @return the list of all priam instances */ @GET public String getInstances() { - StringBuilder response = new StringBuilder(); - List allInstances = factory.getAllIds(config.getAppName()); - for (PriamInstance node : allInstances) { - response.append(node.toString()); - response.append("\n"); - } - return response.toString(); + return factory.getAllIds(config.getAppName()) + .stream() + .map(PriamInstance::toString) + .collect(Collectors.joining("\n", "", "\n")); } /** @@ -84,12 +84,23 @@ public String getInstance(@PathParam("id") int id) { */ @POST public Response createInstance( - @QueryParam("id") int id, @QueryParam("instanceID") String instanceID, - @QueryParam("hostname") String hostname, @QueryParam("ip") String ip, - @QueryParam("rack") String rack, @QueryParam("token") String token) { - log.info("Creating instance [id={}, instanceId={}, hostname={}, ip={}, rack={}, token={}", - id, instanceID, hostname, ip, rack, token); - PriamInstance instance = factory.create(config.getAppName(), id, instanceID, hostname, ip, rack, null, token); + @QueryParam("id") int id, + @QueryParam("instanceID") String instanceID, + @QueryParam("hostname") String hostname, + @QueryParam("ip") String ip, + @QueryParam("rack") String rack, + @QueryParam("token") String token) { + log.info( + "Creating instance [id={}, instanceId={}, hostname={}, ip={}, rack={}, token={}", + id, + instanceID, + hostname, + ip, + rack, + token); + PriamInstance instance = + factory.create( + config.getAppName(), id, instanceID, hostname, ip, rack, null, token); URI uri = UriBuilder.fromPath("/{id}").build(instance.getId()); return Response.created(uri).build(); } @@ -109,14 +120,15 @@ public Response deleteInstance(@PathParam("id") int id) { } /** - * Returns the PriamInstance with the given {@code id}, or - * throws a WebApplicationException(400) if none found. + * Returns the PriamInstance with the given {@code id}, or throws a WebApplicationException(400) + * if none found. * * @param id the node id * @return PriamInstance with the given {@code id} */ private PriamInstance getByIdIfFound(int id) { - PriamInstance instance = factory.getInstance(config.getAppName(), config.getDC(), id); + PriamInstance instance = + factory.getInstance(config.getAppName(), instanceInfo.getRegion(), id); if (instance == null) { throw notFound(String.format("No priam instance with id %s found", id)); } @@ -124,6 +136,7 @@ private PriamInstance getByIdIfFound(int id) { } private static WebApplicationException notFound(String message) { - return new WebApplicationException(Response.status(Response.Status.NOT_FOUND).entity(message).build()); + return new WebApplicationException( + Response.status(Response.Status.NOT_FOUND).entity(message).build()); } } diff --git a/priam/src/main/java/com/netflix/priam/resources/RestoreServlet.java b/priam/src/main/java/com/netflix/priam/resources/RestoreServlet.java index b3ff5ab7f..9451eb33e 100644 --- a/priam/src/main/java/com/netflix/priam/resources/RestoreServlet.java +++ b/priam/src/main/java/com/netflix/priam/resources/RestoreServlet.java @@ -1,91 +1,55 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.resources; -import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.google.inject.Provider; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.PriamServer; -import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.health.InstanceState; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.PriamInstance; import com.netflix.priam.restore.Restore; -import com.netflix.priam.tuner.ICassandraTuner; -import com.netflix.priam.utils.ITokenManager; -import org.apache.commons.lang3.StringUtils; -import org.joda.time.DateTime; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import com.netflix.priam.utils.DateUtil; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.math.BigInteger; -import java.util.Date; -import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Path("/v1") @Produces(MediaType.APPLICATION_JSON) public class RestoreServlet { private static final Logger logger = LoggerFactory.getLogger(RestoreServlet.class); - private static final String REST_HEADER_RANGE = "daterange"; - private static final String REST_HEADER_REGION = "region"; - private static final String REST_HEADER_TOKEN = "token"; - private static final String REST_KEYSPACES = "keyspaces"; - private static final String REST_RESTORE_PREFIX = "restoreprefix"; - private static final String REST_SUCCESS = "[\"ok\"]"; - - private IConfiguration config; - private Restore restoreObj; - private Provider pathProvider; - private PriamServer priamServer; - private IPriamInstanceFactory factory; - private ICassandraTuner tuner; - private ICassandraProcess cassProcess; - private ITokenManager tokenManager; - private InstanceState instanceState; + private final Restore restoreObj; + private final InstanceState instanceState; @Inject - public RestoreServlet(IConfiguration config, Restore restoreObj, Provider pathProvider, PriamServer priamServer - , IPriamInstanceFactory factory, ICassandraTuner tuner, ICassandraProcess cassProcess, ITokenManager tokenManager, InstanceState instanceState) { - this.config = config; + public RestoreServlet(Restore restoreObj, InstanceState instanceState) { this.restoreObj = restoreObj; - this.pathProvider = pathProvider; - this.priamServer = priamServer; - this.factory = factory; - this.tuner = tuner; - this.cassProcess = cassProcess; - this.tokenManager = tokenManager; this.instanceState = instanceState; } - /* * @return metadata of current restore. If no restore in progress, returns the metadata of most recent restore attempt. - * status:[not_started|running|success|failure] - * daterange:[startdaterange,enddatarange] - * starttime:[yyyymmddhhmm] - * endtime:[yyyymmddmm] + * restoreStatus: { + * startDateRange: "[yyyymmddhhmm]", + * endDateRange: "[yyyymmddhhmm]", + * executionStartTime: "[yyyymmddhhmm]", + * executionEndTime: "[yyyymmddhhmm]", + * snapshotMetaFile: " used for full snapshot", + * status: "STARTED|FINISHED|FAILED" + * } */ @GET @Path("/restore/status") @@ -95,99 +59,13 @@ public Response status() throws Exception { @GET @Path("/restore") - public Response restore(@QueryParam(REST_HEADER_RANGE) String daterange, @QueryParam(REST_HEADER_REGION) String region, @QueryParam(REST_HEADER_TOKEN) String token, - @QueryParam(REST_KEYSPACES) String keyspaces, @QueryParam(REST_RESTORE_PREFIX) String restorePrefix) throws Exception { - Date startTime; - Date endTime; - - if (StringUtils.isBlank(daterange) || daterange.equalsIgnoreCase("default")) { - startTime = new DateTime().minusDays(1).toDate(); - endTime = new DateTime().toDate(); - } else { - String[] restore = daterange.split(","); - AbstractBackupPath path = pathProvider.get(); - startTime = path.parseDate(restore[0]); - endTime = path.parseDate(restore[1]); - } - - String origRestorePrefix = config.getRestorePrefix(); - if (StringUtils.isNotBlank(restorePrefix)) { - config.setRestorePrefix(restorePrefix); - } - - logger.info("Parameters: { token: [{}], region: [{}], startTime: [{}], endTime: [{}], keyspaces: [{}], restorePrefix: [{}]}", - token, region, startTime, endTime, keyspaces, restorePrefix); - - restore(token, region, startTime, endTime, keyspaces); - - //Since this call is probably never called in parallel, config is multi-thread safe to be edited - if (origRestorePrefix != null) - config.setRestorePrefix(origRestorePrefix); - else config.setRestorePrefix(""); - - return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build(); - } - - /** - * Restore with the specified start and end time. - * - * @param token Overrides the current token with this one, if specified - * @param region Override the region for searching backup - * @param startTime Start time - * @param endTime End time upto which the restore should fetch data - * @param keyspaces Comma seperated list of keyspaces to restore - * @throws Exception - */ - private void restore(String token, String region, Date startTime, Date endTime, String keyspaces) throws Exception { - String origRegion = config.getDC(); - String origToken = priamServer.getId().getInstance().getToken(); - if (StringUtils.isNotBlank(token)) - priamServer.getId().getInstance().setToken(token); - - if (config.isRestoreClosestToken()) - priamServer.getId().getInstance().setToken(closestToken(priamServer.getId().getInstance().getToken(), config.getDC())); - - if (StringUtils.isNotBlank(region)) { - config.setDC(region); - logger.info("Restoring from region {}", region); - priamServer.getId().getInstance().setToken(closestToken(priamServer.getId().getInstance().getToken(), region)); - logger.info("Restore will use token {}", priamServer.getId().getInstance().getToken()); - } - - setRestoreKeyspaces(keyspaces); - - try { - restoreObj.restore(startTime, endTime); - } finally { - config.setDC(origRegion); - priamServer.getId().getInstance().setToken(origToken); - } - tuner.updateAutoBootstrap(config.getYamlLocation(), false); - cassProcess.start(true); - } - - /** - * Find closest token in the specified region - */ - private String closestToken(String token, String region) { - List plist = factory.getAllIds(config.getAppName()); - List tokenList = Lists.newArrayList(); - for (PriamInstance ins : plist) { - if (ins.getDC().equalsIgnoreCase(region)) - tokenList.add(new BigInteger(ins.getToken())); - } - return tokenManager.findClosestToken(new BigInteger(token), tokenList).toString(); + public Response restore(@QueryParam("daterange") String daterange) throws Exception { + DateUtil.DateRange dateRange = new DateUtil.DateRange(daterange); + logger.info( + "Parameters: {startTime: [{}], endTime: [{}]}", + dateRange.getStartTime().toString(), + dateRange.getEndTime().toString()); + restoreObj.restore(dateRange); + return Response.ok("[\"ok\"]", MediaType.APPLICATION_JSON).build(); } - - /* - * TODO: decouple the servlet, config, and restorer. this should not rely on a side - * effect of a list mutation on the config object (treating it as global var). - */ - private void setRestoreKeyspaces(String keyspaces) { - if (StringUtils.isNotBlank(keyspaces)) { - List newKeyspaces = Lists.newArrayList(keyspaces.split(",")); - config.setRestoreKeySpaces(newKeyspaces); - } - } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/resources/SecurityGroupAdmin.java b/priam/src/main/java/com/netflix/priam/resources/SecurityGroupAdmin.java index 2dd494115..6b1f10a7b 100644 --- a/priam/src/main/java/com/netflix/priam/resources/SecurityGroupAdmin.java +++ b/priam/src/main/java/com/netflix/priam/resources/SecurityGroupAdmin.java @@ -1,33 +1,30 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.resources; import com.google.inject.Inject; import com.netflix.priam.identity.IMembership; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import java.util.Collections; import javax.ws.rs.*; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.util.Collections; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This http endpoint allows direct updates (adding/removing) (CIDR) IP addresses and port - * ranges to the security group for this app. + * This http endpoint allows direct updates (adding/removing) (CIDR) IP addresses and port ranges to + * the security group for this app. */ @Path("/v1/secgroup") @Produces(MediaType.TEXT_PLAIN) @@ -42,9 +39,11 @@ public SecurityGroupAdmin(IMembership membership) { } @POST - public Response addACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort") int fromPort, @QueryParam("toPort") int toPort) { - if (!ipAddr.endsWith(CIDR_TAG)) - ipAddr += CIDR_TAG; + public Response addACL( + @QueryParam("ip") String ipAddr, + @QueryParam("fromPort") int fromPort, + @QueryParam("toPort") int toPort) { + if (!ipAddr.endsWith(CIDR_TAG)) ipAddr += CIDR_TAG; try { membership.addACL(Collections.singletonList(ipAddr), fromPort, toPort); } catch (Exception e) { @@ -55,9 +54,11 @@ public Response addACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort") } @DELETE - public Response removeACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort") int fromPort, @QueryParam("toPort") int toPort) { - if (!ipAddr.endsWith(CIDR_TAG)) - ipAddr += CIDR_TAG; + public Response removeACL( + @QueryParam("ip") String ipAddr, + @QueryParam("fromPort") int fromPort, + @QueryParam("toPort") int toPort) { + if (!ipAddr.endsWith(CIDR_TAG)) ipAddr += CIDR_TAG; try { membership.removeACL(Collections.singletonList(ipAddr), fromPort, toPort); } catch (Exception e) { @@ -67,4 +68,3 @@ public Response removeACL(@QueryParam("ip") String ipAddr, @QueryParam("fromPort return Response.ok().build(); } } - diff --git a/priam/src/main/java/com/netflix/priam/restore/AbstractRestore.java b/priam/src/main/java/com/netflix/priam/restore/AbstractRestore.java index c86950d2e..6b8c865c6 100644 --- a/priam/src/main/java/com/netflix/priam/restore/AbstractRestore.java +++ b/priam/src/main/java/com/netflix/priam/restore/AbstractRestore.java @@ -16,176 +16,170 @@ */ package com.netflix.priam.restore; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; +import com.google.inject.Inject; import com.google.inject.Provider; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.*; import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.scheduler.Task; import com.netflix.priam.utils.*; -import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.io.IOException; import java.math.BigInteger; +import java.nio.file.Path; import java.time.LocalDateTime; +import java.time.ZoneId; import java.util.*; +import java.util.concurrent.Future; +import javax.inject.Named; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A means to perform a restore. This class contains the following characteristics: - * - It is agnostic to the source type of the restore, this is determine by the injected IBackupFileSystem. - * - This class can be scheduled, i.e. it is a "Task". - * - When this class is executed, it uses its own thread pool to execute the restores. + * A means to perform a restore. This class contains the following characteristics: - It is agnostic + * to the source type of the restore, this is determine by the injected IBackupFileSystem. - This + * class can be scheduled, i.e. it is a "Task". - When this class is executed, it uses its own + * thread pool to execute the restores. */ -public abstract class AbstractRestore extends Task implements IRestoreStrategy{ - // keeps track of the last few download which was executed. - // TODO fix the magic number of 1000 => the idea of 80% of 1000 files limit per s3 query - static final FifoQueue tracker = new FifoQueue(800); +public abstract class AbstractRestore extends Task implements IRestoreStrategy { private static final Logger logger = LoggerFactory.getLogger(AbstractRestore.class); private static final String JOBNAME = "AbstractRestore"; private static final String SYSTEM_KEYSPACE = "system"; private static BigInteger restoreToken; final IBackupFileSystem fs; final Sleeper sleeper; - private BackupRestoreUtil backupRestoreUtil; - private Provider pathProvider; - private InstanceIdentity id; - private RestoreTokenSelector tokenSelector; - private ICassandraProcess cassProcess; - private InstanceState instanceState; - private MetaData metaData; - private IPostRestoreHook postRestoreHook; - - AbstractRestore(IConfiguration config, IBackupFileSystem fs, String name, Sleeper sleeper, - Provider pathProvider, - InstanceIdentity instanceIdentity, RestoreTokenSelector tokenSelector, - ICassandraProcess cassProcess, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook) { + private final BackupRestoreUtil backupRestoreUtil; + private final Provider pathProvider; + private final InstanceIdentity instanceIdentity; + private final RestoreTokenSelector tokenSelector; + private final ICassandraProcess cassProcess; + private final InstanceState instanceState; + private final MetaData metaData; + private final IPostRestoreHook postRestoreHook; + + @Inject + @Named("v1") + IMetaProxy metaV1Proxy; + + @Inject + @Named("v2") + IMetaProxy metaV2Proxy; + + @Inject IBackupRestoreConfig backupRestoreConfig; + + public AbstractRestore( + IConfiguration config, + IBackupFileSystem fs, + String name, + Sleeper sleeper, + Provider pathProvider, + InstanceIdentity instanceIdentity, + RestoreTokenSelector tokenSelector, + ICassandraProcess cassProcess, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { super(config); this.fs = fs; this.sleeper = sleeper; this.pathProvider = pathProvider; - this.id = instanceIdentity; + this.instanceIdentity = instanceIdentity; this.tokenSelector = tokenSelector; this.cassProcess = cassProcess; this.metaData = metaData; this.instanceState = instanceState; - backupRestoreUtil = new BackupRestoreUtil(config.getRestoreKeyspaceFilter(), config.getRestoreCFFilter()); + backupRestoreUtil = + new BackupRestoreUtil( + config.getRestoreIncludeCFList(), config.getRestoreExcludeCFList()); this.postRestoreHook = postRestoreHook; } - public static final boolean isRestoreEnabled(IConfiguration conf) { + public static final boolean isRestoreEnabled(IConfiguration conf, InstanceInfo instanceInfo) { boolean isRestoreMode = StringUtils.isNotBlank(conf.getRestoreSnapshot()); - boolean isBackedupRac = (CollectionUtils.isEmpty(conf.getBackupRacs()) || conf.getBackupRacs().contains(conf.getRac())); + boolean isBackedupRac = + (CollectionUtils.isEmpty(conf.getBackupRacs()) + || conf.getBackupRacs().contains(instanceInfo.getRac())); return (isRestoreMode && isBackedupRac); } - private final void download(Iterator fsIterator, BackupFileType bkupFileType) throws Exception { + public void setRestoreConfiguration(String restoreIncludeCFList, String restoreExcludeCFList) { + backupRestoreUtil.setFilters(restoreIncludeCFList, restoreExcludeCFList); + } + + private List> download( + Iterator fsIterator, boolean waitForCompletion) throws Exception { + List> futureList = new ArrayList<>(); while (fsIterator.hasNext()) { AbstractBackupPath temp = fsIterator.next(); - if (temp.getType() == BackupFileType.SST && tracker.contains(temp)) - continue; - - if (backupRestoreUtil.isFiltered(temp.getKeyspace(), temp.getColumnFamily())) { //is filtered? - logger.info("Bypassing restoring file \"{}\" as it is part of the keyspace.columnfamily filter list. Its keyspace:cf is: {}:{}", - temp.newRestoreFile(), temp.getKeyspace(), temp.getColumnFamily()); - continue; - } - - if (config.getRestoreKeySpaces().size() != 0 && (!config.getRestoreKeySpaces().contains(temp.getKeyspace()) || temp.getKeyspace().equals(SYSTEM_KEYSPACE))) { - logger.info("Bypassing restoring file \"{}\" as it is system keyspace", temp.newRestoreFile()); + if (backupRestoreUtil.isFiltered( + temp.getKeyspace(), temp.getColumnFamily())) { // is filtered? + logger.info( + "Bypassing restoring file \"{}\" as it is part of the keyspace.columnfamily filter list. Its keyspace:cf is: {}:{}", + temp.newRestoreFile(), + temp.getKeyspace(), + temp.getColumnFamily()); continue; } - if (temp.getType() == bkupFileType) - { - File localFileHandler = temp.newRestoreFile(); - if (logger.isDebugEnabled()) - logger.debug("Created local file name: " + localFileHandler.getAbsolutePath() + File.pathSeparator + localFileHandler.getName()); - downloadFile(temp, localFileHandler); - } + File localFileHandler = temp.newRestoreFile(); + if (logger.isDebugEnabled()) + logger.debug( + "Created local file name: " + + localFileHandler.getAbsolutePath() + + File.pathSeparator + + localFileHandler.getName()); + futureList.add(downloadFile(temp)); } - //Wait for all download to finish that were started from this method. - waitToComplete(); + // Wait for all download to finish that were started from this method. + if (waitForCompletion) waitForCompletion(futureList); + + return futureList; + } + + private void waitForCompletion(List> futureList) throws Exception { + for (Future future : futureList) future.get(); } - private final void downloadCommitLogs(Iterator fsIterator, BackupFileType filter, int lastN) throws Exception { - if (fsIterator == null) - return; + private List> downloadCommitLogs( + Iterator fsIterator, int lastN, boolean waitForCompletion) + throws Exception { + if (fsIterator == null) return null; - BoundedList bl = new BoundedList(lastN); + BoundedList bl = new BoundedList(lastN); while (fsIterator.hasNext()) { AbstractBackupPath temp = fsIterator.next(); - if (temp.getType() == BackupFileType.SST && tracker.contains(temp)) - continue; - - if (temp.getType() == filter) { + if (temp.getType() == BackupFileType.CL) { bl.add(temp); } } - download(bl.iterator(), filter); + return download(bl.iterator(), waitForCompletion); } - private void stopCassProcess() throws IOException { - if (config.getRestoreKeySpaces().size() == 0) - cassProcess.stop(true); - } - - private String getRestorePrefix() { - String prefix = ""; - - if (StringUtils.isNotBlank(config.getRestorePrefix())) - prefix = config.getRestorePrefix(); - else - prefix = config.getBackupPrefix(); - - return prefix; - } - - /* - * Fetches meta.json used to store snapshots metadata. - */ - private final void fetchSnapshotMetaFile(String restorePrefix, List out, Date startTime, Date endTime) throws IllegalStateException { - logger.debug("Looking for snapshot meta file within restore prefix: {}", restorePrefix); - - Iterator backupfiles = fs.list(restorePrefix, startTime, endTime); - if (!backupfiles.hasNext()) { - throw new IllegalStateException("meta.json not found, restore prefix: " + restorePrefix); - } - - while (backupfiles.hasNext()) { - AbstractBackupPath path = backupfiles.next(); - if (path.getType() == BackupFileType.META) - //Since there are now meta file for incrementals as well as snapshot, we need to find the correct one (i.e. the snapshot meta file (meta.json)) - if (path.getFileName().equalsIgnoreCase("meta.json")) { - out.add(path); - } - } + cassProcess.stop(true); } @Override public void execute() throws Exception { - if (!isRestoreEnabled(config)) - return; + if (!isRestoreEnabled(config, instanceIdentity.getInstanceInfo())) return; logger.info("Starting restore for {}", config.getRestoreSnapshot()); - String[] restore = config.getRestoreSnapshot().split(","); - AbstractBackupPath path = pathProvider.get(); - final Date startTime = path.parseDate(restore[0]); - final Date endTime = path.parseDate(restore[1]); + final DateUtil.DateRange dateRange = new DateUtil.DateRange(config.getRestoreSnapshot()); new RetryableCallable() { public Void retriableCall() throws Exception { logger.info("Attempting restore"); - restore(startTime, endTime); + restore(dateRange); logger.info("Restore completed"); // Wait for other server init to complete @@ -193,83 +187,90 @@ public Void retriableCall() throws Exception { return null; } }.call(); - } - public void restore(Date startTime, Date endTime) throws Exception { - //fail early if post restore hook has invalid parameters - if(!postRestoreHook.hasValidParameters()) { + public void restore(DateUtil.DateRange dateRange) throws Exception { + // fail early if post restore hook has invalid parameters + if (!postRestoreHook.hasValidParameters()) { throw new PostRestoreHookException("Invalid PostRestoreHook parameters"); } - //Set the restore status. + Date endTime = new Date(dateRange.getEndTime().toEpochMilli()); + IMetaProxy metaProxy = metaV1Proxy; + if (backupRestoreConfig.enableV2Restore()) metaProxy = metaV2Proxy; + + // Set the restore status. instanceState.getRestoreStatus().resetStatus(); - instanceState.getRestoreStatus().setStartDateRange(DateUtil.convert(startTime)); + instanceState + .getRestoreStatus() + .setStartDateRange( + LocalDateTime.ofInstant(dateRange.getStartTime(), ZoneId.of("UTC"))); instanceState.getRestoreStatus().setEndDateRange(DateUtil.convert(endTime)); instanceState.getRestoreStatus().setExecutionStartTime(LocalDateTime.now()); instanceState.setRestoreStatus(Status.STARTED); - String origToken = id.getInstance().getToken(); + String origToken = instanceIdentity.getInstance().getToken(); try { if (config.isRestoreClosestToken()) { - restoreToken = tokenSelector.getClosestToken(new BigInteger(origToken), startTime); - id.getInstance().setToken(restoreToken.toString()); + restoreToken = + tokenSelector.getClosestToken( + new BigInteger(origToken), + new Date(dateRange.getStartTime().toEpochMilli())); + instanceIdentity.getInstance().setToken(restoreToken.toString()); } - // Stop cassandra if its running and restoring all keyspaces + // Stop cassandra if its running stopCassProcess(); // Cleanup local data - SystemUtils.cleanupDir(config.getDataFileLocation(), config.getRestoreKeySpaces()); + File dataDir = new File(config.getDataFileLocation()); + if (dataDir.exists() && dataDir.isDirectory()) FileUtils.cleanDirectory(dataDir); - // Try and read the Meta file. - List metas = Lists.newArrayList(); - String prefix = getRestorePrefix(); - fetchSnapshotMetaFile(prefix, metas, startTime, endTime); + // Find latest valid meta file. + Optional latestValidMetaFile = + BackupRestoreUtil.getLatestValidMetaPath(metaProxy, dateRange); - if (metas.size() == 0) { - logger.info("[cass_backup] No snapshot meta file found, Restore Failed."); + if (!latestValidMetaFile.isPresent()) { + logger.info("No valid snapshot meta file found, Restore Failed."); instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now()); - instanceState.setRestoreStatus(Status.FINISHED); + instanceState.setRestoreStatus(Status.FAILED); return; } - Collections.sort(metas); - AbstractBackupPath meta = Iterators.getLast(metas.iterator()); - logger.info("Snapshot Meta file for restore {}", meta.getRemotePath()); - instanceState.getRestoreStatus().setSnapshotMetaFile(meta.getRemotePath()); - - //Download the meta.json file. - ArrayList metaFile = new ArrayList<>(); - metaFile.add(meta); - download(metaFile.iterator(), BackupFileType.META); - waitToComplete(); + logger.info( + "Snapshot Meta file for restore {}", latestValidMetaFile.get().getRemotePath()); + instanceState + .getRestoreStatus() + .setSnapshotMetaFile(latestValidMetaFile.get().getRemotePath()); - //Parse meta.json file to find the files required to download from this snapshot. - List snapshots = metaData.toJson(meta.newRestoreFile()); + List allFiles = + BackupRestoreUtil.getAllFiles( + latestValidMetaFile.get(), dateRange, metaProxy, pathProvider); // Download snapshot which is listed in the meta file. - download(snapshots.iterator(), BackupFileType.SNAP); - - logger.info("Downloading incrementals"); - // Download incrementals (SST) after the snapshot meta file. - Iterator incrementals = fs.list(prefix, meta.getTime(), endTime); - download(incrementals, BackupFileType.SST); + List> futureList = new ArrayList<>(); + futureList.addAll(download(allFiles.iterator(), false)); - //Downloading CommitLogs + // Downloading CommitLogs + // Note for Backup V2.0 we do not backup commit logs, as saving them is cost-expensive. if (config.isBackingUpCommitLogs()) { - logger.info("Delete all backuped commitlog files in {}", config.getBackupCommitLogLocation()); + logger.info( + "Delete all backuped commitlog files in {}", + config.getBackupCommitLogLocation()); SystemUtils.cleanupDir(config.getBackupCommitLogLocation(), null); logger.info("Delete all commitlog files in {}", config.getCommitLogLocation()); SystemUtils.cleanupDir(config.getCommitLogLocation(), null); - - Iterator commitLogPathIterator = fs.list(prefix, meta.getTime(), endTime); - downloadCommitLogs(commitLogPathIterator, BackupFileType.CL, config.maxCommitLogsRestore()); + String prefix = fs.getPrefix().toString(); + Iterator commitLogPathIterator = + fs.list(prefix, latestValidMetaFile.get().getTime(), endTime); + futureList.addAll( + downloadCommitLogs( + commitLogPathIterator, config.maxCommitLogsRestore(), false)); } - // Ensure all the files are downloaded. - waitToComplete(); + // Wait for all the futures to finish. + waitForCompletion(futureList); // Given that files are restored now, kick off post restore hook logger.info("Starting post restore hook"); @@ -280,33 +281,30 @@ public void restore(Date startTime, Date endTime) throws Exception { instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now()); instanceState.setRestoreStatus(Status.FINISHED); - //Start cassandra if restore is successful. - if (!config.doesCassandraStartManually()) - cassProcess.start(true); + // Start cassandra if restore is successful. + if (!config.doesCassandraStartManually()) cassProcess.start(true); else - logger.info("config.doesCassandraStartManually() is set to True, hence Cassandra needs to be started manually ..."); + logger.info( + "config.doesCassandraStartManually() is set to True, hence Cassandra needs to be started manually ..."); } catch (Exception e) { instanceState.setRestoreStatus(Status.FAILED); instanceState.getRestoreStatus().setExecutionEndTime(LocalDateTime.now()); logger.error("Error while trying to restore: {}", e.getMessage(), e); throw e; } finally { - id.getInstance().setToken(origToken); + instanceIdentity.getInstance().setToken(origToken); } } /** - * Download file to the location specified. After downloading the file will be decrypted(optionally) and decompressed before saving to final location. - * @param path - path of object to download from source S3/GCS. - * @param restoreLocation - path to the final location of the decompressed and/or decrypted file. - */ - protected abstract void downloadFile(final AbstractBackupPath path, final File restoreLocation) throws Exception; - - /** - * A means to wait until until all threads have completed. It blocks calling thread - * until all tasks are completed. + * Download file to the location specified. After downloading the file will be + * decrypted(optionally) and decompressed before saving to final location. + * + * @param path - path of object to download from source S3/GCS. + * @return Future of the job to track the progress of the job. + * @throws Exception If there is any error in downloading file from the remote file system. */ - protected abstract void waitToComplete(); + protected abstract Future downloadFile(final AbstractBackupPath path) throws Exception; final class BoundedList extends LinkedList { @@ -325,4 +323,8 @@ public boolean add(E o) { return true; } } + + public final int getDownloadTasksQueued() { + return fs.getDownloadTasksQueued(); + } } diff --git a/priam/src/main/java/com/netflix/priam/restore/AwsCrossAccountCryptographyRestoreStrategy.java b/priam/src/main/java/com/netflix/priam/restore/AwsCrossAccountCryptographyRestoreStrategy.java index 8b3366f64..010648b78 100755 --- a/priam/src/main/java/com/netflix/priam/restore/AwsCrossAccountCryptographyRestoreStrategy.java +++ b/priam/src/main/java/com/netflix/priam/restore/AwsCrossAccountCryptographyRestoreStrategy.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; @@ -19,14 +17,14 @@ import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.aws.S3CrossAccountFileSystem; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.MetaData; import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.cryptography.IFileCryptography; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.scheduler.SimpleTimer; @@ -42,26 +40,47 @@ @Singleton public class AwsCrossAccountCryptographyRestoreStrategy extends EncryptedRestoreBase { - private static final Logger logger = LoggerFactory.getLogger(AwsCrossAccountCryptographyRestoreStrategy.class); + private static final Logger logger = + LoggerFactory.getLogger(AwsCrossAccountCryptographyRestoreStrategy.class); public static final String JOBNAME = "AWS_CROSS_ACCT_CRYPTOGRAPHY_RESTORE_JOB"; - //Note: see javadoc for S3CrossAccountFileSystem for reason why we inject a concrete class (S3CrossAccountFileSystem) instead of the inteface IBackupFileSystem + // Note: see javadoc for S3CrossAccountFileSystem for reason why we inject a concrete class + // (S3CrossAccountFileSystem) instead of the inteface IBackupFileSystem @Inject - public AwsCrossAccountCryptographyRestoreStrategy(final IConfiguration config, ICassandraProcess cassProcess - , S3CrossAccountFileSystem crossAcctfs - , Sleeper sleeper - , @Named("filecryptoalgorithm") IFileCryptography fileCryptography - , @Named("pgpcredential") ICredentialGeneric credential - , ICompression compress, Provider pathProvider, - InstanceIdentity id, RestoreTokenSelector tokenSelector, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook) { + public AwsCrossAccountCryptographyRestoreStrategy( + final IConfiguration config, + ICassandraProcess cassProcess, + S3CrossAccountFileSystem crossAcctfs, + Sleeper sleeper, + @Named("filecryptoalgorithm") IFileCryptography fileCryptography, + @Named("pgpcredential") ICredentialGeneric credential, + ICompression compress, + Provider pathProvider, + InstanceIdentity id, + RestoreTokenSelector tokenSelector, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { - super(config, crossAcctfs.getBackupFileSystem(), JOBNAME, sleeper, cassProcess, pathProvider, id, tokenSelector, credential, fileCryptography, compress, metaData, instanceState, postRestoreHook); + super( + config, + crossAcctfs.getBackupFileSystem(), + JOBNAME, + sleeper, + cassProcess, + pathProvider, + id, + tokenSelector, + credential, + fileCryptography, + compress, + metaData, + instanceState, + postRestoreHook); } - /** - * @return a timer used by the scheduler to determine when "this" should be run. - */ + /** @return a timer used by the scheduler to determine when "this" should be run. */ public static TaskTimer getTimer() { return new SimpleTimer(JOBNAME); } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreBase.java b/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreBase.java index a9fd1fc88..2e69d5870 100755 --- a/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreBase.java +++ b/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreBase.java @@ -1,164 +1,208 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; import com.google.inject.Provider; -import com.netflix.priam.defaultimpl.ICassandraProcess; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.IBackupFileSystem; +import com.netflix.priam.backup.MetaData; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.compress.ICompression; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.cred.ICredentialGeneric; -import com.netflix.priam.backup.*; -import com.netflix.priam.compress.ICompression; import com.netflix.priam.cryptography.IFileCryptography; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.scheduler.NamedThreadPoolExecutor; import com.netflix.priam.utils.RetryableCallable; import com.netflix.priam.utils.Sleeper; +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; import org.bouncycastle.util.io.Streams; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.*; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Provides common functionality applicable to all restore strategies - */ -public abstract class EncryptedRestoreBase extends AbstractRestore{ +/** Provides common functionality applicable to all restore strategies */ +public abstract class EncryptedRestoreBase extends AbstractRestore { private static final Logger logger = LoggerFactory.getLogger(EncryptedRestoreBase.class); + private static final String TMP_SUFFIX = ".tmp"; - private String jobName; - private ICredentialGeneric pgpCredential; - private IFileCryptography fileCryptography; - private ICompression compress; + private final String jobName; + private final ICredentialGeneric pgpCredential; + private final IFileCryptography fileCryptography; + private final ICompression compress; private final ThreadPoolExecutor executor; - private AtomicInteger count = new AtomicInteger(); - protected EncryptedRestoreBase(IConfiguration config, IBackupFileSystem fs, String jobName, Sleeper sleeper, - ICassandraProcess cassProcess, Provider pathProvider, - InstanceIdentity instanceIdentity, RestoreTokenSelector tokenSelector, ICredentialGeneric pgpCredential, - IFileCryptography fileCryptography, ICompression compress, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook) { - super(config, fs, jobName, sleeper, pathProvider, instanceIdentity, tokenSelector, cassProcess, metaData, instanceState, postRestoreHook); + protected EncryptedRestoreBase( + IConfiguration config, + IBackupFileSystem fs, + String jobName, + Sleeper sleeper, + ICassandraProcess cassProcess, + Provider pathProvider, + InstanceIdentity instanceIdentity, + RestoreTokenSelector tokenSelector, + ICredentialGeneric pgpCredential, + IFileCryptography fileCryptography, + ICompression compress, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { + super( + config, + fs, + jobName, + sleeper, + pathProvider, + instanceIdentity, + tokenSelector, + cassProcess, + metaData, + instanceState, + postRestoreHook); this.jobName = jobName; this.pgpCredential = pgpCredential; this.fileCryptography = fileCryptography; this.compress = compress; - executor = new NamedThreadPoolExecutor(config.getMaxBackupDownloadThreads(), jobName); + executor = new NamedThreadPoolExecutor(config.getRestoreThreads(), jobName); executor.allowCoreThreadTimeOut(true); - logger.info("Trying to restore cassandra cluster with filesystem: {}, RestoreStrategy: {}, Encryption: ON, Compression: {}", - fs.getClass(), jobName, compress.getClass()); + logger.info( + "Trying to restore cassandra cluster with filesystem: {}, RestoreStrategy: {}, Encryption: ON, Compression: {}", + fs.getClass(), + jobName, + compress.getClass()); } @Override - protected final void downloadFile(final AbstractBackupPath path, final File restoreLocation) throws Exception{ - final char[] passPhrase = new String(this.pgpCredential.getValue(ICredentialGeneric.KEY.PGP_PASSWORD)).toCharArray(); - File tempFile = new File(restoreLocation.getAbsolutePath() + ".tmp"); - count.incrementAndGet(); - - try { - executor.submit(new RetryableCallable() { - - @Override - public Integer retriableCall() throws Exception { - - //== download object from source bucket - try { - - logger.info("Downloading file from: {} to: {}", path.getRemotePath(), tempFile.getAbsolutePath()); - fs.download(path, new FileOutputStream(tempFile), tempFile.getAbsolutePath()); - tracker.adjustAndAdd(path); - logger.info("Completed downloading file from: {} to: {}", path.getRemotePath(), tempFile.getAbsolutePath()); - - - } catch (Exception ex) { - //This behavior is retryable; therefore, lets get to a clean state before each retry. - if (tempFile.exists()) { - tempFile.createNewFile(); + protected final Future downloadFile(final AbstractBackupPath path) throws Exception { + final char[] passPhrase = + new String(this.pgpCredential.getValue(ICredentialGeneric.KEY.PGP_PASSWORD)) + .toCharArray(); + File restoreLocation = path.newRestoreFile(); + File tempFile = new File(restoreLocation.getAbsolutePath() + TMP_SUFFIX); + + return executor.submit( + new RetryableCallable() { + + @Override + public Path retriableCall() throws Exception { + + // == download object from source bucket + try { + // Not retrying to download file here as it is already in RetryCallable. + fs.downloadFile(path, TMP_SUFFIX, 0 /* retries */); + } catch (Exception ex) { + // This behavior is retryable; therefore, lets get to a clean state + // before each retry. + if (tempFile.exists()) { + tempFile.createNewFile(); + } + + throw new Exception( + "Exception downloading file from: " + + path.getRemotePath() + + " to: " + + tempFile.getAbsolutePath(), + ex); } - throw new Exception("Exception downloading file from: " + path.getRemotePath() + " to: " + tempFile.getAbsolutePath(), ex); - } + // == object downloaded successfully from source, decrypt it. + File decryptedFile = new File(tempFile.getAbsolutePath() + ".decrypted"); + try (OutputStream fOut = + new BufferedOutputStream( + new FileOutputStream( + decryptedFile)); // destination file after + // decryption) + InputStream in = + new BufferedInputStream( + new FileInputStream(tempFile.getAbsolutePath()))) { + InputStream encryptedDataInputStream = + fileCryptography.decryptStream( + in, passPhrase, tempFile.getAbsolutePath()); + Streams.pipeAll(encryptedDataInputStream, fOut); + logger.info( + "Completed decrypting file: {} to final file dest: {}", + tempFile.getAbsolutePath(), + decryptedFile.getAbsolutePath()); + + } catch (Exception ex) { + // This behavior is retryable; therefore, lets get to a clean state + // before each retry. + if (tempFile.exists()) { + tempFile.createNewFile(); + } + + if (decryptedFile.exists()) { + decryptedFile.createNewFile(); + } + + throw new Exception( + "Exception during decryption file: " + + decryptedFile.getAbsolutePath(), + ex); + } - //== object downloaded successfully from source, decrypt it. - File decryptedFile = new File(tempFile.getAbsolutePath() + ".decrypted"); - try(OutputStream fOut = new BufferedOutputStream(new FileOutputStream(decryptedFile)); //destination file after decryption) - InputStream in = new BufferedInputStream(new FileInputStream(tempFile.getAbsolutePath()))) { - InputStream encryptedDataInputStream = fileCryptography.decryptStream(in, passPhrase, tempFile.getAbsolutePath()); - Streams.pipeAll(encryptedDataInputStream, fOut); - logger.info("Completed decrypting file: {} to final file dest: {}", tempFile.getAbsolutePath(), decryptedFile.getAbsolutePath()); - - } catch (Exception ex) { - //This behavior is retryable; therefore, lets get to a clean state before each retry. - if (tempFile.exists()) { - tempFile.createNewFile(); + // == object is downloaded and decrypted, now uncompress it if necessary + if (path.getCompression() == CompressionType.NONE) { + Files.move(decryptedFile.toPath(), restoreLocation.toPath()); + } else { + logger.info( + "Start uncompressing file: {} to the FINAL destination stream", + decryptedFile.getAbsolutePath()); + + try (InputStream is = + new BufferedInputStream( + new FileInputStream(decryptedFile)); + BufferedOutputStream finalDestination = + new BufferedOutputStream( + new FileOutputStream(restoreLocation))) { + compress.decompressAndClose(is, finalDestination); + } catch (Exception ex) { + throw new Exception( + "Exception uncompressing file: " + + decryptedFile.getAbsolutePath() + + " to the FINAL destination stream", + ex); + } + + logger.info( + "Completed uncompressing file: {} to the FINAL destination stream " + + " current worker: {}", + decryptedFile.getAbsolutePath(), + Thread.currentThread().getName()); } + // if here, everything was successful for this object, lets remove unneeded + // file(s) + if (tempFile.exists()) tempFile.delete(); if (decryptedFile.exists()) { - decryptedFile.createNewFile(); + decryptedFile.delete(); } - throw new Exception("Exception during decryption file: " + decryptedFile.getAbsolutePath(), ex); - } - - //== object downloaded and decrypted successfully, now uncompress it - logger.info("Start uncompressing file: {} to the FINAL destination stream", decryptedFile.getAbsolutePath()); - - try(InputStream is = new BufferedInputStream(new FileInputStream(decryptedFile)); - BufferedOutputStream finalDestination = new BufferedOutputStream(new FileOutputStream(restoreLocation))) { - compress.decompressAndClose(is, finalDestination); - } catch (Exception ex) { - throw new Exception("Exception uncompressing file: " + decryptedFile.getAbsolutePath() + " to the FINAL destination stream", ex); + return Paths.get(path.getRemotePath()); } - - logger.info("Completed uncompressing file: {} to the FINAL destination stream " - + " current worker: {}", decryptedFile.getAbsolutePath(), Thread.currentThread().getName()); - //if here, everything was successful for this object, lets remove unneeded file(s) - if (tempFile.exists()) - tempFile.delete(); - - if (decryptedFile.exists()) { - decryptedFile.delete(); - } - - return count.decrementAndGet(); - } - - }); - }catch (Exception e){ - throw new Exception("Exception in download of: " + path.getFileName() + ", msg: " + e.getLocalizedMessage(), e); - } - - } - - @Override - protected final void waitToComplete() { - while (count.get() != 0) { - try { - sleeper.sleep(1000); - } catch (InterruptedException e) { - logger.error("Interrupted: ", e); - Thread.currentThread().interrupt(); - } - } + }); } @Override public String getName() { return this.jobName; } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreStrategy.java b/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreStrategy.java index 40391e4b8..fdef4f4ad 100755 --- a/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreStrategy.java +++ b/priam/src/main/java/com/netflix/priam/restore/EncryptedRestoreStrategy.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; @@ -19,14 +17,14 @@ import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.IBackupFileSystem; import com.netflix.priam.backup.MetaData; import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.cryptography.IFileCryptography; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.scheduler.SimpleTimer; @@ -44,15 +42,36 @@ public class EncryptedRestoreStrategy extends EncryptedRestoreBase { public static final String JOBNAME = "CRYPTOGRAPHY_RESTORE_JOB"; @Inject - public EncryptedRestoreStrategy(final IConfiguration config, ICassandraProcess cassProcess, - @Named("encryptedbackup") IBackupFileSystem fs, Sleeper sleeper - , @Named("filecryptoalgorithm") IFileCryptography fileCryptography - , @Named("pgpcredential") ICredentialGeneric credential - , ICompression compress, Provider pathProvider, - InstanceIdentity id, RestoreTokenSelector tokenSelector, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook - ) { + public EncryptedRestoreStrategy( + final IConfiguration config, + ICassandraProcess cassProcess, + @Named("encryptedbackup") IBackupFileSystem fs, + Sleeper sleeper, + @Named("filecryptoalgorithm") IFileCryptography fileCryptography, + @Named("pgpcredential") ICredentialGeneric credential, + ICompression compress, + Provider pathProvider, + InstanceIdentity id, + RestoreTokenSelector tokenSelector, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { - super(config, fs, JOBNAME, sleeper, cassProcess, pathProvider, id, tokenSelector, credential, fileCryptography, compress, metaData, instanceState, postRestoreHook); + super( + config, + fs, + JOBNAME, + sleeper, + cassProcess, + pathProvider, + id, + tokenSelector, + credential, + fileCryptography, + compress, + metaData, + instanceState, + postRestoreHook); } /* @@ -61,5 +80,4 @@ public EncryptedRestoreStrategy(final IConfiguration config, ICassandraProcess c public static TaskTimer getTimer() { return new SimpleTimer(JOBNAME); } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/GoogleCryptographyRestoreStrategy.java b/priam/src/main/java/com/netflix/priam/restore/GoogleCryptographyRestoreStrategy.java index 5dfdcfef7..1c0fc9d79 100755 --- a/priam/src/main/java/com/netflix/priam/restore/GoogleCryptographyRestoreStrategy.java +++ b/priam/src/main/java/com/netflix/priam/restore/GoogleCryptographyRestoreStrategy.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; @@ -19,14 +17,14 @@ import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.IBackupFileSystem; import com.netflix.priam.backup.MetaData; import com.netflix.priam.compress.ICompression; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.cred.ICredentialGeneric; import com.netflix.priam.cryptography.IFileCryptography; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.scheduler.SimpleTimer; @@ -37,28 +35,44 @@ @Singleton public class GoogleCryptographyRestoreStrategy extends EncryptedRestoreBase { - private static final Logger logger = LoggerFactory.getLogger(GoogleCryptographyRestoreStrategy.class); + private static final Logger logger = + LoggerFactory.getLogger(GoogleCryptographyRestoreStrategy.class); public static final String JOBNAME = "GOOGLECLOUDSTORAGE_RESTORE_JOB"; @Inject - public GoogleCryptographyRestoreStrategy(final IConfiguration config, ICassandraProcess cassProcess, @Named("gcsencryptedbackup") IBackupFileSystem fs, Sleeper sleeper - , @Named("filecryptoalgorithm") IFileCryptography fileCryptography - , @Named("pgpcredential") ICredentialGeneric credential - , ICompression compress, Provider pathProvider, - InstanceIdentity id, RestoreTokenSelector tokenSelector, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook - ) { - super(config, fs, JOBNAME, sleeper, cassProcess, pathProvider, id, tokenSelector, credential, fileCryptography, compress, metaData, instanceState, postRestoreHook); + public GoogleCryptographyRestoreStrategy( + final IConfiguration config, + ICassandraProcess cassProcess, + @Named("gcsencryptedbackup") IBackupFileSystem fs, + Sleeper sleeper, + @Named("filecryptoalgorithm") IFileCryptography fileCryptography, + @Named("pgpcredential") ICredentialGeneric credential, + ICompression compress, + Provider pathProvider, + InstanceIdentity id, + RestoreTokenSelector tokenSelector, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { + super( + config, + fs, + JOBNAME, + sleeper, + cassProcess, + pathProvider, + id, + tokenSelector, + credential, + fileCryptography, + compress, + metaData, + instanceState, + postRestoreHook); } - - /** - * @return a timer used by the scheduler to determine when "this" should be run. - */ + /** @return a timer used by the scheduler to determine when "this" should be run. */ public static TaskTimer getTimer() { return new SimpleTimer(JOBNAME); } - - - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/IPostRestoreHook.java b/priam/src/main/java/com/netflix/priam/restore/IPostRestoreHook.java index f743fbabf..7f7e3335a 100644 --- a/priam/src/main/java/com/netflix/priam/restore/IPostRestoreHook.java +++ b/priam/src/main/java/com/netflix/priam/restore/IPostRestoreHook.java @@ -18,11 +18,10 @@ import com.google.inject.ImplementedBy; -/** - * Interface for post restore hook - */ +/** Interface for post restore hook */ @ImplementedBy(PostRestoreHook.class) public interface IPostRestoreHook { boolean hasValidParameters(); + void execute() throws Exception; } diff --git a/priam/src/main/java/com/netflix/priam/restore/IRestoreStrategy.java b/priam/src/main/java/com/netflix/priam/restore/IRestoreStrategy.java index 5869e397b..1da3a7677 100755 --- a/priam/src/main/java/com/netflix/priam/restore/IRestoreStrategy.java +++ b/priam/src/main/java/com/netflix/priam/restore/IRestoreStrategy.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; @@ -19,5 +17,5 @@ * A means to restore C* files from various source types (e.g. Google, AWS bucket whose objects are not owned by the current IAM role), and encrypted / non-encrypted data. */ public interface IRestoreStrategy { - //public void restore(Date startTime, Date endTime) throws Exception; + // public void restore(Date startTime, Date endTime) throws Exception; } diff --git a/priam/src/main/java/com/netflix/priam/restore/PostRestoreHook.java b/priam/src/main/java/com/netflix/priam/restore/PostRestoreHook.java index 0330c5cc2..1e1c2009c 100644 --- a/priam/src/main/java/com/netflix/priam/restore/PostRestoreHook.java +++ b/priam/src/main/java/com/netflix/priam/restore/PostRestoreHook.java @@ -20,11 +20,6 @@ import com.netflix.priam.scheduler.NamedThreadPoolExecutor; import com.netflix.priam.utils.RetryableCallable; import com.netflix.priam.utils.Sleeper; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -32,19 +27,24 @@ import java.nio.channels.FileLock; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import javax.inject.Inject; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * An implementation of IPostRestoreHook. Kicks off a child process for post restore hook using ProcessBuilder; uses heart beat monitor to monitor progress of the sub process - * and uses a file lock to pass the active state to the sub process + * An implementation of IPostRestoreHook. Kicks off a child process for post restore hook using + * ProcessBuilder; uses heart beat monitor to monitor progress of the sub process and uses a file + * lock to pass the active state to the sub process */ public class PostRestoreHook implements IPostRestoreHook { private static final Logger logger = LoggerFactory.getLogger(PostRestoreHook.class); private final IConfiguration config; private final Sleeper sleeper; - private static String PostRestoreHookCommandDelimiter = " "; - private static String PriamPostRestoreHookFilePrefix = "PriamFileForPostRestoreHook"; - private static String PriamPostRestoreHookFileSuffix = ".tmp"; - private static String PriamPostRestoreHookFileOptionName = "--parentHookFilePath="; + private static final String PostRestoreHookCommandDelimiter = " "; + private static final String PriamPostRestoreHookFilePrefix = "PriamFileForPostRestoreHook"; + private static final String PriamPostRestoreHookFileSuffix = ".tmp"; + private static final String PriamPostRestoreHookFileOptionName = "--parentHookFilePath="; @Inject public PostRestoreHook(IConfiguration config, Sleeper sleeper) { @@ -54,30 +54,34 @@ public PostRestoreHook(IConfiguration config, Sleeper sleeper) { /** * Checks parameters to make sure none are blank + * * @return if all parameters are valid */ public boolean hasValidParameters() { - if(config.isPostRestoreHookEnabled()) { - if(StringUtils.isBlank(config.getPostRestoreHook()) - || StringUtils.isBlank(config.getPostRestoreHookHeartbeatFileName()) - || StringUtils.isBlank(config.getPostRestoreHookDoneFileName())) { - return false; - } + if (config.isPostRestoreHookEnabled()) { + return !StringUtils.isBlank(config.getPostRestoreHook()) + && !StringUtils.isBlank(config.getPostRestoreHookHeartbeatFileName()) + && !StringUtils.isBlank(config.getPostRestoreHookDoneFileName()); } return true; } /** - * Executes a sub process as part of post restore hook, and waits for the completion of the process. In case of lack of heart beat from the sub process, existing sub process is terminated - * and new sub process is kicked off + * Executes a sub process as part of post restore hook, and waits for the completion of the + * process. In case of lack of heart beat from the sub process, existing sub process is + * terminated and new sub process is kicked off + * * @throws Exception */ public void execute() throws Exception { - if(config.isPostRestoreHookEnabled()) { + if (config.isPostRestoreHookEnabled()) { logger.debug("Started PostRestoreHook execution"); - //create a temp file to be used to indicate state of the current process, to the sub-process - File tempLockFile = File.createTempFile(PriamPostRestoreHookFilePrefix, PriamPostRestoreHookFileSuffix); + // create a temp file to be used to indicate state of the current process, to the + // sub-process + File tempLockFile = + File.createTempFile( + PriamPostRestoreHookFilePrefix, PriamPostRestoreHookFileSuffix); RandomAccessFile raf = new RandomAccessFile(tempLockFile.getPath(), "rw"); FileChannel fileChannel = raf.getChannel(); FileLock lock = fileChannel.lock(); @@ -88,26 +92,38 @@ public void execute() throws Exception { int countOfProcessStarts = 0; while (true) { if (doneFileExists()) { - logger.info("Not starting PostRestoreHook since DONE file already exists."); + logger.info( + "Not starting PostRestoreHook since DONE file already exists."); break; } String postRestoreHook = config.getPostRestoreHook(); - //add temp file path as parameter to the jar file - postRestoreHook = postRestoreHook + PostRestoreHookCommandDelimiter + PriamPostRestoreHookFileOptionName + tempLockFile.getAbsolutePath(); - String[] processCommandArguments = postRestoreHook.split(PostRestoreHookCommandDelimiter); + // add temp file path as parameter to the jar file + postRestoreHook = + postRestoreHook + + PostRestoreHookCommandDelimiter + + PriamPostRestoreHookFileOptionName + + tempLockFile.getAbsolutePath(); + String[] processCommandArguments = + postRestoreHook.split(PostRestoreHookCommandDelimiter); ProcessBuilder processBuilder = new ProcessBuilder(processCommandArguments); - //start sub-process + // start sub-process Process process = processBuilder.inheritIO().start(); - logger.info("Started PostRestoreHook: {} - Attempt#{}", postRestoreHook, ++countOfProcessStarts); + logger.info( + "Started PostRestoreHook: {} - Attempt#{}", + postRestoreHook, + ++countOfProcessStarts); - //monitor progress of sub-process + // monitor progress of sub-process monitorPostRestoreHookHeartBeat(process); - //block until sub-process completes or until the timeout - if (!process.waitFor(config.getPostRestoreHookTimeOutInDays(), TimeUnit.DAYS)) { - logger.info("PostRestoreHook process did not complete within {} days. Forcefully terminating the process.", config.getPostRestoreHookTimeOutInDays()); + // block until sub-process completes or until the timeout + if (!process.waitFor( + config.getPostRestoreHookTimeOutInDays(), TimeUnit.DAYS)) { + logger.info( + "PostRestoreHook process did not complete within {} days. Forcefully terminating the process.", + config.getPostRestoreHookTimeOutInDays()); process.destroyForcibly(); } @@ -119,10 +135,13 @@ public void execute() throws Exception { } logger.debug("Completed PostRestoreHook execution"); } else { - throw new PostRestoreHookException(String.format("Could not acquire lock on a temp file necessary for PostRestoreHook to execute. Path to temp file: %s", tempLockFile.getAbsolutePath())); + throw new PostRestoreHookException( + String.format( + "Could not acquire lock on a temp file necessary for PostRestoreHook to execute. Path to temp file: %s", + tempLockFile.getAbsolutePath())); } } finally { - //close and delete temp file + // close and delete temp file lock.release(); fileChannel.close(); raf.close(); @@ -131,37 +150,44 @@ public void execute() throws Exception { } } - /** * Monitors heart beat of the process + * * @param process Process to be monitored * @throws InterruptedException * @throws IOException */ - private void monitorPostRestoreHookHeartBeat(Process process) throws InterruptedException, IOException { + private void monitorPostRestoreHookHeartBeat(Process process) + throws InterruptedException, IOException { File heartBeatFile = new File(config.getPostRestoreHookHeartbeatFileName()); - ThreadPoolExecutor heartBeatPoolExecutor = new NamedThreadPoolExecutor(1, "PostRestoreHook_HeartBeatThreadPool"); + ThreadPoolExecutor heartBeatPoolExecutor = + new NamedThreadPoolExecutor(1, "PostRestoreHook_HeartBeatThreadPool"); heartBeatPoolExecutor.allowCoreThreadTimeOut(true); - heartBeatPoolExecutor.submit(new RetryableCallable() { - @Override - public Integer retriableCall() throws Exception { - while (true) { - sleeper.sleep(config.getPostRestoreHookHeartbeatCheckFrequencyInMs()); - if(System.currentTimeMillis() - heartBeatFile.lastModified() > config.getPostRestoreHookHeartBeatTimeoutInMs()) { - //kick off post restore hook process, since there is no heartbeat - logger.info("No heartbeat for the last {} ms, killing the existing process.", config.getPostRestoreHookHeartBeatTimeoutInMs()); - if(process.isAlive()) { - process.destroyForcibly(); + heartBeatPoolExecutor.submit( + new RetryableCallable() { + @Override + public Integer retriableCall() throws Exception { + while (true) { + sleeper.sleep(config.getPostRestoreHookHeartbeatCheckFrequencyInMs()); + if (System.currentTimeMillis() - heartBeatFile.lastModified() + > config.getPostRestoreHookHeartBeatTimeoutInMs()) { + // kick off post restore hook process, since there is no heartbeat + logger.info( + "No heartbeat for the last {} ms, killing the existing process.", + config.getPostRestoreHookHeartBeatTimeoutInMs()); + if (process.isAlive()) { + process.destroyForcibly(); + } + return 0; + } } - return 0; } - } - } - }); + }); } /** * Checks for presence of DONE file + * * @return if done file exists */ private boolean doneFileExists() { diff --git a/priam/src/main/java/com/netflix/priam/restore/PostRestoreHookException.java b/priam/src/main/java/com/netflix/priam/restore/PostRestoreHookException.java index e6daf7d78..8b09f6441 100644 --- a/priam/src/main/java/com/netflix/priam/restore/PostRestoreHookException.java +++ b/priam/src/main/java/com/netflix/priam/restore/PostRestoreHookException.java @@ -16,9 +16,7 @@ package com.netflix.priam.restore; -/** - * Exception raised by PostRestoreHook - */ +/** Exception raised by PostRestoreHook */ public class PostRestoreHookException extends Exception { public PostRestoreHookException(String message) { @@ -28,5 +26,4 @@ public PostRestoreHookException(String message) { public PostRestoreHookException(String message, Exception e) { super(message, e); } - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/Restore.java b/priam/src/main/java/com/netflix/priam/restore/Restore.java index 7ed6d2aed..c5cd04388 100644 --- a/priam/src/main/java/com/netflix/priam/restore/Restore.java +++ b/priam/src/main/java/com/netflix/priam/restore/Restore.java @@ -20,71 +20,56 @@ import com.google.inject.Provider; import com.google.inject.Singleton; import com.google.inject.name.Named; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.IBackupFileSystem; import com.netflix.priam.backup.MetaData; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.health.InstanceState; import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.scheduler.NamedThreadPoolExecutor; import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.utils.RetryableCallable; import com.netflix.priam.utils.Sleeper; +import java.nio.file.Path; +import java.util.concurrent.Future; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileOutputStream; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Main class for restoring data from backup. Backup restored using this way are not encrypted. - */ +/** Main class for restoring data from backup. Backup restored using this way are not encrypted. */ @Singleton public class Restore extends AbstractRestore { public static final String JOBNAME = "AUTO_RESTORE_JOB"; private static final Logger logger = LoggerFactory.getLogger(Restore.class); - private final ThreadPoolExecutor executor; - private AtomicInteger count = new AtomicInteger(); @Inject - public Restore(IConfiguration config, @Named("backup") IBackupFileSystem fs, Sleeper sleeper, ICassandraProcess cassProcess, - Provider pathProvider, - InstanceIdentity instanceIdentity, RestoreTokenSelector tokenSelector, MetaData metaData, InstanceState instanceState, IPostRestoreHook postRestoreHook) { - super(config, fs, JOBNAME, sleeper, pathProvider, instanceIdentity, tokenSelector, cassProcess, metaData, instanceState, postRestoreHook); - executor = new NamedThreadPoolExecutor(config.getMaxBackupDownloadThreads(), JOBNAME); - executor.allowCoreThreadTimeOut(true); + public Restore( + IConfiguration config, + @Named("backup") IBackupFileSystem fs, + Sleeper sleeper, + ICassandraProcess cassProcess, + Provider pathProvider, + InstanceIdentity instanceIdentity, + RestoreTokenSelector tokenSelector, + MetaData metaData, + InstanceState instanceState, + IPostRestoreHook postRestoreHook) { + super( + config, + fs, + JOBNAME, + sleeper, + pathProvider, + instanceIdentity, + tokenSelector, + cassProcess, + metaData, + instanceState, + postRestoreHook); } @Override - protected final void downloadFile(final AbstractBackupPath path, final File restoreLocation) throws Exception { - count.incrementAndGet(); - executor.submit(new RetryableCallable() { - @Override - public Integer retriableCall() throws Exception { - logger.info("Downloading file: {} to: {}", path.getRemotePath(), restoreLocation.getAbsolutePath()); - fs.download(path, new FileOutputStream(restoreLocation), restoreLocation.getAbsolutePath()); - tracker.adjustAndAdd(path); - // TODO: fix me -> if there is exception the why hang? - logger.info("Completed download of file: {} to: {}", path.getRemotePath(), restoreLocation.getAbsolutePath()); - return count.decrementAndGet(); - } - }); - } - - @Override - protected final void waitToComplete() { - while (count.get() != 0) { - try { - sleeper.sleep(1000); - } catch (InterruptedException e) { - logger.error("Interrupted: ", e); - Thread.currentThread().interrupt(); - } - } + protected final Future downloadFile(final AbstractBackupPath path) throws Exception { + return fs.asyncDownloadFile(path, 5 /* retries */); } public static TaskTimer getTimer() { @@ -95,8 +80,4 @@ public static TaskTimer getTimer() { public String getName() { return JOBNAME; } - - public int getActiveCount() { - return (executor == null) ? 0 : executor.getActiveCount(); - } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/RestoreContext.java b/priam/src/main/java/com/netflix/priam/restore/RestoreContext.java index 2b839d76c..ba54a819d 100755 --- a/priam/src/main/java/com/netflix/priam/restore/RestoreContext.java +++ b/priam/src/main/java/com/netflix/priam/restore/RestoreContext.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.restore; @@ -37,37 +35,48 @@ public RestoreContext(IConfiguration config, PriamScheduler scheduler) { this.scheduler = scheduler; } - public boolean isRestoreEnabled(){ + public boolean isRestoreEnabled() { return !StringUtils.isEmpty(config.getRestoreSnapshot()); } public void restore() throws Exception { - if (!isRestoreEnabled()) - return; + if (!isRestoreEnabled()) return; - //Restore is required. + // Restore is required. if (StringUtils.isEmpty(config.getRestoreSourceType()) && !config.isRestoreEncrypted()) { - //Restore is needed and it will be done from the primary AWS account - scheduler.addTask(Restore.JOBNAME, Restore.class, Restore.getTimer());//restore from the AWS primary acct + // Restore is needed and it will be done from the primary AWS account + scheduler.addTask( + Restore.JOBNAME, + Restore.class, + Restore.getTimer()); // restore from the AWS primary acct logger.info("Scheduled task " + Restore.JOBNAME); } else if (config.isRestoreEncrypted()) { SourceType sourceType = SourceType.lookup(config.getRestoreSourceType(), true, false); - if (sourceType == null) - { - scheduler.addTask(EncryptedRestoreStrategy.JOBNAME, EncryptedRestoreStrategy.class, EncryptedRestoreStrategy.getTimer()); + if (sourceType == null) { + scheduler.addTask( + EncryptedRestoreStrategy.JOBNAME, + EncryptedRestoreStrategy.class, + EncryptedRestoreStrategy.getTimer()); logger.info("Scheduled task " + Restore.JOBNAME); return; } switch (sourceType) { case AWSCROSSACCT: - scheduler.addTask(AwsCrossAccountCryptographyRestoreStrategy.JOBNAME, AwsCrossAccountCryptographyRestoreStrategy.class, AwsCrossAccountCryptographyRestoreStrategy.getTimer()); - logger.info("Scheduled task " + AwsCrossAccountCryptographyRestoreStrategy.JOBNAME); + scheduler.addTask( + AwsCrossAccountCryptographyRestoreStrategy.JOBNAME, + AwsCrossAccountCryptographyRestoreStrategy.class, + AwsCrossAccountCryptographyRestoreStrategy.getTimer()); + logger.info( + "Scheduled task " + AwsCrossAccountCryptographyRestoreStrategy.JOBNAME); break; case GOOGLE: - scheduler.addTask(GoogleCryptographyRestoreStrategy.JOBNAME, GoogleCryptographyRestoreStrategy.class, GoogleCryptographyRestoreStrategy.getTimer()); + scheduler.addTask( + GoogleCryptographyRestoreStrategy.JOBNAME, + GoogleCryptographyRestoreStrategy.class, + GoogleCryptographyRestoreStrategy.getTimer()); logger.info("Scheduled task " + GoogleCryptographyRestoreStrategy.JOBNAME); break; } @@ -75,7 +84,8 @@ public void restore() throws Exception { } enum SourceType { - AWSCROSSACCT("AWSCROSSACCT"), GOOGLE("GOOGLE"); + AWSCROSSACCT("AWSCROSSACCT"), + GOOGLE("GOOGLE"); private static final Logger logger = LoggerFactory.getLogger(SourceType.class); @@ -85,12 +95,16 @@ enum SourceType { this.sourceType = sourceType.toUpperCase(); } - public static SourceType lookup(String sourceType, boolean acceptNullOrEmpty, boolean acceptIllegalValue) throws UnsupportedTypeException { + public static SourceType lookup( + String sourceType, boolean acceptNullOrEmpty, boolean acceptIllegalValue) + throws UnsupportedTypeException { if (StringUtils.isEmpty(sourceType)) - if (acceptNullOrEmpty) - return null; + if (acceptNullOrEmpty) return null; else { - String message = String.format("%s is not a supported SourceType. Supported values are %s", sourceType, getSupportedValues()); + String message = + String.format( + "%s is not a supported SourceType. Supported values are %s", + sourceType, getSupportedValues()); logger.error(message); throw new UnsupportedTypeException(message); } @@ -98,10 +112,15 @@ public static SourceType lookup(String sourceType, boolean acceptNullOrEmpty, bo try { return SourceType.valueOf(sourceType.toUpperCase()); } catch (IllegalArgumentException ex) { - String message = String.format("%s is not a supported SourceType. Supported values are %s", sourceType, getSupportedValues()); + String message = + String.format( + "%s is not a supported SourceType. Supported values are %s", + sourceType, getSupportedValues()); if (acceptIllegalValue) { - message = message + ". Since acceptIllegalValue is set to True, returning NULL instead."; + message = + message + + ". Since acceptIllegalValue is set to True, returning NULL instead."; logger.error(message); return null; } @@ -111,13 +130,11 @@ public static SourceType lookup(String sourceType, boolean acceptNullOrEmpty, bo } } - private static String getSupportedValues() { - StringBuffer supportedValues = new StringBuffer(); + StringBuilder supportedValues = new StringBuilder(); boolean first = true; for (SourceType type : SourceType.values()) { - if (!first) - supportedValues.append(","); + if (!first) supportedValues.append(","); supportedValues.append(type); first = false; } @@ -133,4 +150,4 @@ public String getSourceType() { return sourceType; } } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/restore/RestoreTokenSelector.java b/priam/src/main/java/com/netflix/priam/restore/RestoreTokenSelector.java index 6787fbe5b..7f0563cf8 100644 --- a/priam/src/main/java/com/netflix/priam/restore/RestoreTokenSelector.java +++ b/priam/src/main/java/com/netflix/priam/restore/RestoreTokenSelector.java @@ -21,44 +21,35 @@ import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.IBackupFileSystem; import com.netflix.priam.utils.ITokenManager; - import java.math.BigInteger; import java.util.ArrayList; import java.util.Date; import java.util.Iterator; import java.util.List; -/** - * Runs algorithms as finding closest token from a list of token (in a backup) - */ +/** Runs algorithms as finding closest token from a list of token (in a backup) */ public class RestoreTokenSelector { private final ITokenManager tokenManager; private final IBackupFileSystem fs; @Inject + public RestoreTokenSelector(ITokenManager tokenManager, @Named("backup") IBackupFileSystem fs) { - public RestoreTokenSelector(ITokenManager tokenManager, @Named("backup") IBackupFileSystem fs) - - { this.tokenManager = tokenManager; this.fs = fs; } /** - * Get the closest token to current token from the list of tokens available - * in the backup + * Get the closest token to current token from the list of tokens available in the backup * - * @param tokenToSearch - * Token to search for - * @param startDate - * Date for which the backups are available + * @param tokenToSearch Token to search for + * @param startDate Date for which the backups are available * @return Token as BigInteger */ public BigInteger getClosestToken(BigInteger tokenToSearch, Date startDate) { - List tokenList = new ArrayList(); + List tokenList = new ArrayList<>(); Iterator iter = fs.listPrefixes(startDate); - while (iter.hasNext()) - tokenList.add(new BigInteger(iter.next().getToken())); + while (iter.hasNext()) tokenList.add(new BigInteger(iter.next().getToken())); return tokenManager.findClosestToken(tokenToSearch, tokenList); } } diff --git a/priam/src/main/java/com/netflix/priam/scheduler/BlockingSubmitThreadPoolExecutor.java b/priam/src/main/java/com/netflix/priam/scheduler/BlockingSubmitThreadPoolExecutor.java index 5fa23c770..3a047a7a4 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/BlockingSubmitThreadPoolExecutor.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/BlockingSubmitThreadPoolExecutor.java @@ -16,25 +16,26 @@ */ package com.netflix.priam.scheduler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * {@link ThreadPoolExecutor} that will block in the {@code submit()} method - * until the task can be successfully added to the queue. + * {@link ThreadPoolExecutor} that will block in the {@code submit()} method until the task can be + * successfully added to the queue. */ public class BlockingSubmitThreadPoolExecutor extends ThreadPoolExecutor { private static final long DEFAULT_SLEEP = 100; private static final long DEFAULT_KEEP_ALIVE = 100; - private static final Logger logger = LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class); - private BlockingQueue queue; - private long giveupTime; - private AtomicInteger active; + private static final Logger logger = + LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class); + private final BlockingQueue queue; + private final long giveupTime; + private final AtomicInteger active; - public BlockingSubmitThreadPoolExecutor(int maximumPoolSize, BlockingQueue workQueue, long timeoutAdding) { + public BlockingSubmitThreadPoolExecutor( + int maximumPoolSize, BlockingQueue workQueue, long timeoutAdding) { super(maximumPoolSize, maximumPoolSize, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, workQueue); this.queue = workQueue; this.giveupTime = timeoutAdding; @@ -42,9 +43,8 @@ public BlockingSubmitThreadPoolExecutor(int maximumPoolSize, BlockingQueue Future submit(Callable task) { @@ -73,9 +73,7 @@ protected void afterExecute(Runnable r, Throwable t) { active.decrementAndGet(); } - /** - * blocking call to test if the threads are done or not. - */ + /** blocking call to test if the threads are done or not. */ public void sleepTillEmpty() { long timeout = 0; @@ -84,7 +82,8 @@ public void sleepTillEmpty() { if (timeout <= giveupTime) { Thread.sleep(DEFAULT_SLEEP); timeout += DEFAULT_SLEEP; - logger.debug("After Sleeping for empty: {}, Count: {}", +queue.size(), active.get()); + logger.debug( + "After Sleeping for empty: {}, Count: {}", +queue.size(), active.get()); } else { throw new RuntimeException("Timed out because TPE is too busy..."); } @@ -92,6 +91,5 @@ public void sleepTillEmpty() { throw new RuntimeException(e); } } - } } diff --git a/priam/src/main/java/com/netflix/priam/scheduler/CronTimer.java b/priam/src/main/java/com/netflix/priam/scheduler/CronTimer.java index 4872015a0..b190b6198 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/CronTimer.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/CronTimer.java @@ -16,23 +16,26 @@ */ package com.netflix.priam.scheduler; +import java.text.ParseException; import org.apache.commons.lang3.StringUtils; import org.quartz.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.text.ParseException; - -/** - * Runs jobs at the specified absolute time and frequency - */ +/** Runs jobs at the specified absolute time and frequency */ public class CronTimer implements TaskTimer { private static final Logger logger = LoggerFactory.getLogger(CronTimer.class); - private String cronExpression; + private final String cronExpression; private String name; public enum DayOfWeek { - SUN, MON, TUE, WED, THU, FRI, SAT + SUN, + MON, + TUE, + WED, + THU, + FRI, + SAT } /* @@ -43,33 +46,25 @@ public CronTimer(String name, int min) { cronExpression = "*" + " " + "0/" + min + " " + "* * * ?"; } - /** - * Hourly cron. - */ + /** Hourly cron. */ public CronTimer(String name, int minute, int sec) { this.name = name; cronExpression = sec + " " + minute + " 0/1 * * ?"; } - /** - * Daily Cron - */ + /** Daily Cron */ public CronTimer(String name, int hour, int minute, int sec) { this.name = name; cronExpression = sec + " " + minute + " " + hour + " * * ?"; } - /** - * Weekly cron jobs - */ + /** Weekly cron jobs */ public CronTimer(String name, DayOfWeek dayofweek, int hour, int minute, int sec) { this.name = name; cronExpression = sec + " " + minute + " " + hour + " * * " + dayofweek; } - /** - * Cron Expression. - */ + /** Cron Expression. */ public CronTimer(String expression) { this.cronExpression = expression; } @@ -80,7 +75,10 @@ public CronTimer(String name, String expression) { } public Trigger getTrigger() throws ParseException { - return TriggerBuilder.newTrigger().withIdentity(name, Scheduler.DEFAULT_GROUP).withSchedule(CronScheduleBuilder.cronSchedule(cronExpression)).build(); + return TriggerBuilder.newTrigger() + .withIdentity(name, Scheduler.DEFAULT_GROUP) + .withSchedule(CronScheduleBuilder.cronSchedule(cronExpression)) + .build(); } @Override @@ -88,18 +86,26 @@ public String getCronExpression() { return this.cronExpression; } - public static CronTimer getCronTimer(final String jobName, final String cronExpression) throws IllegalArgumentException { + public static CronTimer getCronTimer(final String jobName, final String cronExpression) + throws IllegalArgumentException { CronTimer cronTimer = null; if (!StringUtils.isEmpty(cronExpression) && cronExpression.equalsIgnoreCase("-1")) { - logger.info("Skipping {} as it is disabled via setting {} cron to -1.", jobName, jobName); + logger.info( + "Skipping {} as it is disabled via setting {} cron to -1.", jobName, jobName); } else { - if (StringUtils.isEmpty(cronExpression) || !CronExpression.isValidExpression(cronExpression)) - throw new IllegalArgumentException("Invalid CRON expression: " + cronExpression + - ". Please use -1, if you wish to disable " + jobName + " else fix the CRON expression and try again!"); + if (StringUtils.isEmpty(cronExpression) + || !CronExpression.isValidExpression(cronExpression)) + throw new IllegalArgumentException( + "Invalid CRON expression: " + + cronExpression + + ". Please use -1, if you wish to disable " + + jobName + + " else fix the CRON expression and try again!"); cronTimer = new CronTimer(jobName, cronExpression); - logger.info("Starting {} with CRON expression {}", jobName, cronTimer.getCronExpression()); + logger.info( + "Starting {} with CRON expression {}", jobName, cronTimer.getCronExpression()); } return cronTimer; } diff --git a/priam/src/main/java/com/netflix/priam/scheduler/NamedThreadPoolExecutor.java b/priam/src/main/java/com/netflix/priam/scheduler/NamedThreadPoolExecutor.java index d078b5dae..8649f1ef8 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/NamedThreadPoolExecutor.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/NamedThreadPoolExecutor.java @@ -17,16 +17,20 @@ package com.netflix.priam.scheduler; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.util.concurrent.*; public class NamedThreadPoolExecutor extends ThreadPoolExecutor { public NamedThreadPoolExecutor(int poolSize, String poolName) { - this(poolSize, poolName, new LinkedBlockingQueue()); + this(poolSize, poolName, new LinkedBlockingQueue<>()); } public NamedThreadPoolExecutor(int poolSize, String poolName, BlockingQueue queue) { - super(poolSize, poolSize, 1000, TimeUnit.MILLISECONDS, queue, + super( + poolSize, + poolSize, + 1000, + TimeUnit.MILLISECONDS, + queue, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(poolName + "-%d").build(), new LocalRejectedExecutionHandler(queue)); } @@ -44,10 +48,9 @@ public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) { throw new RejectedExecutionException("ThreadPoolExecutor has shut down"); try { - if (queue.offer(task, 1000, TimeUnit.MILLISECONDS)) - break; + if (queue.offer(task, 1000, TimeUnit.MILLISECONDS)) break; } catch (InterruptedException e) { - //NOP + // NOP } } } diff --git a/priam/src/main/java/com/netflix/priam/scheduler/PriamScheduler.java b/priam/src/main/java/com/netflix/priam/scheduler/PriamScheduler.java index f6bfce851..20fe13082 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/PriamScheduler.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/PriamScheduler.java @@ -19,15 +19,12 @@ import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.priam.utils.Sleeper; +import java.text.ParseException; import org.quartz.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.text.ParseException; - -/** - * Scheduling class to schedule Priam tasks. Uses Quartz scheduler - */ +/** Scheduling class to schedule Priam tasks. Uses Quartz scheduler */ @Singleton public class PriamScheduler { private static final Logger logger = LoggerFactory.getLogger(PriamScheduler.class); @@ -47,15 +44,20 @@ public PriamScheduler(SchedulerFactory factory, GuiceJobFactory jobFactory, Slee this.sleeper = sleeper; } - /** - * Add a task to the scheduler - */ - public void addTask(String name, Class taskclass, TaskTimer timer) throws SchedulerException, ParseException { + /** Add a task to the scheduler */ + public void addTask(String name, Class taskclass, TaskTimer timer) + throws SchedulerException, ParseException { assert timer != null : "Cannot add scheduler task " + name + " as no timer is set"; - JobDetail job = JobBuilder.newJob().withIdentity(name, Scheduler.DEFAULT_GROUP).ofType(taskclass).build();//new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass); + JobDetail job = + JobBuilder.newJob() + .withIdentity(name, Scheduler.DEFAULT_GROUP) + .ofType(taskclass) + .build(); if (timer.getCronExpression() != null && !timer.getCronExpression().isEmpty()) { - logger.info("Scheduled task metadata. Task name: {}" - + ", cron expression: {}", taskclass.getName(), timer.getCronExpression()); + logger.info( + "Scheduled task metadata. Task name: {}" + ", cron expression: {}", + taskclass.getName(), + timer.getCronExpression()); } else { logger.info("Scheduled task metadata. Task name: {}", taskclass.getName()); @@ -63,34 +65,61 @@ public void addTask(String name, Class taskclass, TaskTimer time scheduler.scheduleJob(job, timer.getTrigger()); } - /** - * Add a delayed task to the scheduler - */ - public void addTaskWithDelay(final String name, Class taskclass, final TaskTimer timer, final int delayInSeconds) throws SchedulerException, ParseException { + /** Add a delayed task to the scheduler */ + public void addTaskWithDelay( + final String name, + Class taskclass, + final TaskTimer timer, + final int delayInSeconds) { assert timer != null : "Cannot add scheduler task " + name + " as no timer is set"; - final JobDetail job = JobBuilder.newJob().withIdentity(name, Scheduler.DEFAULT_GROUP).ofType(taskclass).build();//new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass); + final JobDetail job = + JobBuilder.newJob() + .withIdentity(name, Scheduler.DEFAULT_GROUP) + .ofType(taskclass) + .build(); - //we know Priam doesn't do too many new tasks, so this is probably easy/safe/simple - new Thread(new Runnable() { - public void run() { - try { - sleeper.sleepQuietly(delayInSeconds * 1000L); - scheduler.scheduleJob(job, timer.getTrigger()); - } catch (SchedulerException e) { - logger.warn("problem occurred while scheduling a job with name {}", name, e); - } catch (ParseException e) { - logger.warn("problem occurred while parsing a job with name {}", name, e); - } - } - }).start(); + // we know Priam doesn't do too many new tasks, so this is probably easy/safe/simple + new Thread( + () -> { + try { + sleeper.sleepQuietly(delayInSeconds * 1000L); + scheduler.scheduleJob(job, timer.getTrigger()); + } catch (SchedulerException e) { + logger.warn( + "problem occurred while scheduling a job with name {}", + name, + e); + } catch (ParseException e) { + logger.warn( + "problem occurred while parsing a job with name {}", + name, + e); + } + }) + .start(); } public void runTaskNow(Class taskclass) throws Exception { jobFactory.guice.getInstance(taskclass).execute(null); } - public void deleteTask(String name) throws SchedulerException, ParseException { - scheduler.deleteJob(new JobKey(name, Scheduler.DEFAULT_GROUP)); + public void deleteTask(String name) throws SchedulerException { + TriggerKey triggerKey = TriggerKey.triggerKey(name, Scheduler.DEFAULT_GROUP); + + // Check if trigger exists for the job. If there is a trigger, we want to remove those + // trigger. + if (scheduler.checkExists(triggerKey)) { + logger.info("Removing triggers for the job: {}", name); + scheduler.pauseTrigger(triggerKey); + scheduler.unscheduleJob(triggerKey); + } + + // Check if any job exists for the key provided. If yes, we want to delete the job. + JobKey jobKey = JobKey.jobKey(name, Scheduler.DEFAULT_GROUP); + if (scheduler.checkExists(jobKey)) { + logger.info("Removing job from scheduler: {}", name); + scheduler.deleteJob(jobKey); + } } public final Scheduler getScheduler() { diff --git a/priam/src/main/java/com/netflix/priam/scheduler/SchedulerType.java b/priam/src/main/java/com/netflix/priam/scheduler/SchedulerType.java deleted file mode 100644 index 2627c7ec8..000000000 --- a/priam/src/main/java/com/netflix/priam/scheduler/SchedulerType.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.scheduler; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Created by aagrawal on 3/8/17. - */ -public enum SchedulerType { - HOUR("HOUR"), CRON("CRON"); - - private static final Logger logger = LoggerFactory.getLogger(SchedulerType.class); - private final String schedulerType; - - SchedulerType(String schedulerType) { - this.schedulerType = schedulerType.toUpperCase(); - } - - /* - * Helper method to find the scheduler type - case insensitive as user may put value which are not right case. - * This returns the ScheulerType if one is found. Refer to table below to understand the use-case. - * - * SchedulerTypeValue|acceptNullorEmpty|acceptIllegalValue|Result - * Valid value |NA |NA |SchedulerType - * Empty string |True |NA |NULL - * NULL |True |NA |NULL - * Empty string |False |NA |UnsupportedTypeException - * NULL |False |NA |UnsupportedTypeException - * Illegal value |NA |True |NULL - * Illegal value |NA |False |UnsupportedTypeException - */ - - public static SchedulerType lookup(String schedulerType, boolean acceptNullOrEmpty, boolean acceptIllegalValue) throws UnsupportedTypeException { - if (StringUtils.isEmpty(schedulerType)) - if (acceptNullOrEmpty) - return null; - else { - String message = String.format("%s is not a supported SchedulerType. Supported values are %s", schedulerType, getSupportedValues()); - logger.error(message); - throw new UnsupportedTypeException(message); - } - - try { - return SchedulerType.valueOf(schedulerType.toUpperCase()); - } catch (IllegalArgumentException ex) { - String message = String.format("%s is not a supported SchedulerType. Supported values are %s", schedulerType, getSupportedValues()); - - if (acceptIllegalValue) { - message = message + ". Since acceptIllegalValue is set to True, returning NULL instead."; - logger.error(message); - return null; - } - - logger.error(message); - throw new UnsupportedTypeException(message, ex); - } - } - - private static String getSupportedValues() { - StringBuffer supportedValues = new StringBuffer(); - boolean first = true; - for (SchedulerType type : SchedulerType.values()) { - if (!first) - supportedValues.append(","); - supportedValues.append(type); - first = false; - } - - return supportedValues.toString(); - } - - public static SchedulerType lookup(String schedulerType) throws UnsupportedTypeException { - return lookup(schedulerType, false, false); - } - - public String getSchedulerType() { - return schedulerType; - } - -} diff --git a/priam/src/main/java/com/netflix/priam/scheduler/SimpleTimer.java b/priam/src/main/java/com/netflix/priam/scheduler/SimpleTimer.java index fcecba9bf..4b584d05f 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/SimpleTimer.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/SimpleTimer.java @@ -16,52 +16,88 @@ */ package com.netflix.priam.scheduler; -import org.quartz.Scheduler; -import org.quartz.SimpleScheduleBuilder; -import org.quartz.Trigger; -import org.quartz.TriggerBuilder; - +import com.google.common.base.Preconditions; import java.text.ParseException; +import java.time.Instant; import java.util.Date; +import org.quartz.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * SimpleTimer allows jobs to run starting from specified time occurring at - * regular frequency's. Frequency of the execution timestamp since epoch. + * SimpleTimer allows jobs to run starting from specified time occurring at regular frequency's. + * Frequency of the execution timestamp since epoch. */ public class SimpleTimer implements TaskTimer { - private Trigger trigger; + private static final Logger logger = LoggerFactory.getLogger(SimpleTimer.class); + private final Trigger trigger; public SimpleTimer(String name, long interval) { - this.trigger = TriggerBuilder.newTrigger() - .withIdentity(name) - .withSchedule(SimpleScheduleBuilder.simpleSchedule().withIntervalInMilliseconds(interval) - .repeatForever().withMisfireHandlingInstructionFireNow()) - .build(); - //.new SimpleTrigger(name, SimpleTrigger.REPEAT_INDEFINITELY, interval); + this.trigger = + TriggerBuilder.newTrigger() + .withIdentity(name) + .withSchedule( + SimpleScheduleBuilder.simpleSchedule() + .withIntervalInMilliseconds(interval) + .repeatForever() + .withMisfireHandlingInstructionFireNow()) + .build(); + } + + /** Run forever every @period seconds starting at @start */ + public SimpleTimer(String name, int period, Instant start) { + Preconditions.checkArgument(period > 0); + Preconditions.checkArgument(start.compareTo(Instant.EPOCH) >= 0); + this.trigger = + TriggerBuilder.newTrigger() + .withIdentity(name) + .withSchedule( + CalendarIntervalScheduleBuilder.calendarIntervalSchedule() + .withMisfireHandlingInstructionFireAndProceed() + .withIntervalInSeconds(period)) + .startAt(Date.from(start)) + .build(); } - /** - * Run once at given time... - */ + /** Run once at given time... */ public SimpleTimer(String name, String group, long startTime) { - this.trigger = TriggerBuilder.newTrigger() - .withIdentity(name, group) - .withSchedule(SimpleScheduleBuilder.simpleSchedule().withMisfireHandlingInstructionFireNow()) - .startAt(new Date(startTime)) - .build(); - //new SimpleTrigger(name, group, new Date(startTime)); + this.trigger = + TriggerBuilder.newTrigger() + .withIdentity(name, group) + .withSchedule( + SimpleScheduleBuilder.simpleSchedule() + .withMisfireHandlingInstructionFireNow()) + .startAt(new Date(startTime)) + .build(); } - /** - * Run immediatly and dont do that again. - */ + /** Run immediatly and dont do that again. */ public SimpleTimer(String name) { - this.trigger = TriggerBuilder.newTrigger() - .withIdentity(name, Scheduler.DEFAULT_GROUP) - .withSchedule(SimpleScheduleBuilder.simpleSchedule().withMisfireHandlingInstructionFireNow()) - .startNow() - .build(); - //new SimpleTrigger(name, Scheduler.DEFAULT_GROUP); + this.trigger = + TriggerBuilder.newTrigger() + .withIdentity(name, Scheduler.DEFAULT_GROUP) + .withSchedule( + SimpleScheduleBuilder.simpleSchedule() + .withMisfireHandlingInstructionFireNow()) + .startNow() + .build(); + } + + public static SimpleTimer getSimpleTimer(final String jobName, final long interval) + throws IllegalArgumentException { + SimpleTimer simpleTimer = null; + + if (interval <= 0) { + logger.info( + "Skipping {} as it is disabled via setting {} to {}.", + jobName, + jobName, + interval); + } else { + simpleTimer = new SimpleTimer(jobName, interval); + logger.info("Starting {} with interval of {}", jobName, interval); + } + return simpleTimer; } public Trigger getTrigger() throws ParseException { diff --git a/priam/src/main/java/com/netflix/priam/scheduler/Task.java b/priam/src/main/java/com/netflix/priam/scheduler/Task.java index a8e9d6c80..f986544f1 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/Task.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/Task.java @@ -16,30 +16,29 @@ */ package com.netflix.priam.scheduler; -import com.google.common.base.Throwables; import com.netflix.priam.config.IConfiguration; +import java.util.concurrent.atomic.AtomicInteger; import org.quartz.Job; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.management.MBeanServer; -import javax.management.ObjectName; -import java.lang.management.ManagementFactory; -import java.util.concurrent.atomic.AtomicInteger; - /** - * Task class that should be implemented by all cron tasks. Jobconf will contain - * any instance specific data + * Task class that should be implemented by all cron tasks. Jobconf will contain any instance + * specific data * - * NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to failure + *

NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to + * failure */ -public abstract class Task implements Job, TaskMBean { +public abstract class Task implements Job { public STATE status = STATE.DONE; public enum STATE { - ERROR, RUNNING, DONE, NOT_APPLICABLE + ERROR, + RUNNING, + DONE, + NOT_APPLICABLE } protected final IConfiguration config; @@ -49,53 +48,31 @@ public enum STATE { private final AtomicInteger executions = new AtomicInteger(); protected Task(IConfiguration config) { - this(config, ManagementFactory.getPlatformMBeanServer()); - } - - protected Task(IConfiguration config, MBeanServer mBeanServer) { this.config = config; - // TODO: don't do mbean registration here - String mbeanName = "com.priam.scheduler:type=" + this.getClass().getName(); - try { - mBeanServer.registerMBean(this, new ObjectName(mbeanName)); - initialize(); - } catch (Exception e) { - throw Throwables.propagate(e); - } } - - /** - * This method has to be implemented and cannot thow any exception. - */ + /** This method has to be implemented and cannot throw any exception. */ public void initialize() throws ExecutionException { // nothing to initialize } public abstract void execute() throws Exception; - /** - * Main method to execute a task - */ + /** Main method to execute a task */ public void execute(JobExecutionContext context) throws JobExecutionException { executions.incrementAndGet(); try { - if (status == STATE.RUNNING) - return; + if (status == STATE.RUNNING) return; status = STATE.RUNNING; execute(); - } catch (Exception e) { - status = STATE.ERROR; - logger.error("Couldnt execute the task because of {}", e.getMessage(), e); - errors.incrementAndGet(); } catch (Throwable e) { status = STATE.ERROR; - logger.error("Couldnt execute the task because of {}", e.getMessage(), e); + logger.error("Could not execute the task: {} because of {}", getName(), e.getMessage()); + e.printStackTrace(); errors.incrementAndGet(); } - if (status != STATE.ERROR) - status = STATE.DONE; + if (status != STATE.ERROR) status = STATE.DONE; } public STATE state() { @@ -111,5 +88,4 @@ public int getExecutionCount() { } public abstract String getName(); - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/scheduler/TaskMBean.java b/priam/src/main/java/com/netflix/priam/scheduler/TaskMBean.java deleted file mode 100644 index e25666867..000000000 --- a/priam/src/main/java/com/netflix/priam/scheduler/TaskMBean.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.scheduler; - -/** - * MBean to monitor Task executions. - * - */ -public interface TaskMBean { - int getErrorCount(); - - int getExecutionCount(); - - String getName(); -} diff --git a/priam/src/main/java/com/netflix/priam/scheduler/TaskTimer.java b/priam/src/main/java/com/netflix/priam/scheduler/TaskTimer.java index 052a112ce..5d790293f 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/TaskTimer.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/TaskTimer.java @@ -16,13 +16,10 @@ */ package com.netflix.priam.scheduler; -import org.quartz.Trigger; - import java.text.ParseException; +import org.quartz.Trigger; -/** - * Interface to represent time/interval - */ +/** Interface to represent time/interval */ public interface TaskTimer { Trigger getTrigger() throws ParseException; diff --git a/priam/src/main/java/com/netflix/priam/scheduler/UnsupportedTypeException.java b/priam/src/main/java/com/netflix/priam/scheduler/UnsupportedTypeException.java index 06737e162..36385e4c1 100644 --- a/priam/src/main/java/com/netflix/priam/scheduler/UnsupportedTypeException.java +++ b/priam/src/main/java/com/netflix/priam/scheduler/UnsupportedTypeException.java @@ -1,24 +1,19 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ - package com.netflix.priam.scheduler; -/** - * Created by aagrawal on 3/14/17. - */ +/** Created by aagrawal on 3/14/17. */ public class UnsupportedTypeException extends Exception { public UnsupportedTypeException(String msg, Throwable th) { super(msg, th); diff --git a/priam/src/main/java/com/netflix/priam/services/SnapshotMetaService.java b/priam/src/main/java/com/netflix/priam/services/SnapshotMetaService.java deleted file mode 100644 index 3e75f7fda..000000000 --- a/priam/src/main/java/com/netflix/priam/services/SnapshotMetaService.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.services; - -import com.google.inject.Provider; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackup; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.BackupRestoreUtil; -import com.netflix.priam.backup.IFileSystemContext; -import com.netflix.priam.backupv2.*; -import com.netflix.priam.config.IBackupRestoreConfig; -import com.netflix.priam.defaultimpl.CassandraOperations; -import com.netflix.priam.scheduler.CronTimer; -import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.utils.CassandraMonitor; -import com.netflix.priam.utils.DateUtil; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.commons.lang3.StringUtils; -import org.quartz.CronExpression; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.inject.Singleton; -import java.io.File; -import java.time.Instant; -import java.util.*; - -/** - * This service will run on CRON as specified by {@link IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} - * The intent of this service is to run a full snapshot on Cassandra, get the list of the SSTables on disk - * and then create a manifest.json file which will encapsulate the list of the files i.e. capture filesystem at a moment - * in time. - * This manifest.json file will ensure the true filesystem status is exposed (for external entities) and will be - * used in future for Priam Backup Version 2 where a file is not uploaded to backup file system unless SSTable has - * been modified. This will lead to huge reduction in storage costs and provide bandwidth back to Cassandra instead - * of creating/uploading snapshots. - * Note that this component will "try" to enqueue the files to upload, but no guarantee is provided. If the enqueue fails - * for any reason, it is considered "OK" as there will be another service pushing all the files in the queue for upload - * (think of this like a cleanup thread and will help us in "resuming" any failed backup for any reason). - * Created by aagrawal on 6/18/18. - */ -@Singleton -public class SnapshotMetaService extends AbstractBackup { - public static final String JOBNAME = "SnapshotMetaService"; - - private static final Logger logger = LoggerFactory.getLogger(SnapshotMetaService.class); - private static final String SNAPSHOT_PREFIX = "snap_v2_"; - private static final String CASSANDRA_MANIFEST_FILE = "manifest.json"; - private BackupRestoreUtil backupRestoreUtil; - private MetaFileWriterBuilder metaFileWriter; - private MetaFileWriterBuilder.DataStep dataStep; - private MetaFileManager metaFileManager; - private CassandraOperations cassandraOperations; - private String snapshotName = null; - - @Inject - SnapshotMetaService(IConfiguration config, IFileSystemContext backupFileSystemCtx, Provider pathFactory, - MetaFileWriterBuilder metaFileWriter, MetaFileManager metaFileManager, CassandraOperations cassandraOperations) { - super(config, backupFileSystemCtx, pathFactory); - this.cassandraOperations = cassandraOperations; - backupRestoreUtil = new BackupRestoreUtil(config.getSnapshotKeyspaceFilters(), config.getSnapshotCFFilter()); - this.metaFileWriter = metaFileWriter; - this.metaFileManager = metaFileManager; - } - - /** - * Interval between generating snapshot meta file using {@link com.netflix.priam.services.SnapshotMetaService}. - * - * @param backupRestoreConfig {@link IBackupRestoreConfig#getSnapshotMetaServiceCronExpression()} to get configuration details from priam. Use "-1" to disable the service. - * @return the timer to be used for snapshot meta service. - * @throws Exception if the configuration is not set correctly or are not valid. This is to ensure we fail-fast. - **/ - public static TaskTimer getTimer(IBackupRestoreConfig backupRestoreConfig) throws Exception { - CronTimer cronTimer = null; - String cronExpression = backupRestoreConfig.getSnapshotMetaServiceCronExpression(); - - if (!StringUtils.isEmpty(cronExpression) && cronExpression.equalsIgnoreCase("-1")) { - logger.info("Skipping SnapshotMetaService as SnapshotMetaService cron is disabled via -1."); - } else { - if (StringUtils.isEmpty(cronExpression) || !CronExpression.isValidExpression(cronExpression)) - throw new Exception("Invalid CRON expression: " + cronExpression + - ". Please use -1, if you wish to disable SnapshotMetaService else fix the CRON expression and try again!"); - - cronTimer = new CronTimer(JOBNAME, cronExpression); - logger.info("Starting SnapshotMetaService with CRON expression {}", cronTimer.getCronExpression()); - } - return cronTimer; - } - - String generateSnapshotName(Instant snapshotInstant) { - return SNAPSHOT_PREFIX + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, snapshotInstant); - } - - @Override - public void execute() throws Exception { - if (!CassandraMonitor.hasCassadraStarted()) { - logger.debug("Cassandra has not started, hence SnapshotMetaService will not run"); - return; - } - - try { - Instant snapshotInstant = DateUtil.getInstant(); - snapshotName = generateSnapshotName(snapshotInstant); - logger.info("Initializing SnapshotMetaService for taking a snapshot {}", snapshotName); - - //Perform a cleanup of old snapshot meta_v2.json files, if any, as we don't want our disk to be filled by them. - //These files may be leftover - // 1) when Priam shutdown in middle of this service and may not be full JSON - // 2) No permission to upload to backup file system. - metaFileManager.cleanupOldMetaFiles(); - - //TODO: enqueue all the old backup folder for upload/delete, if any, as we don't want our disk to be filled by them. - //processOldSnapshotV2Folders(); - - //Take a new snapshot - cassandraOperations.takeSnapshot(snapshotName); - - //Process the snapshot and upload the meta file. - processSnapshot(snapshotInstant).uploadMetaFile(true); - - logger.info("Finished processing snapshot meta service"); - } catch (Exception e) { - logger.error("Error while executing SnapshotMetaService", e); - } - - } - - MetaFileWriterBuilder.UploadStep processSnapshot(Instant snapshotInstant) throws Exception { - dataStep = metaFileWriter.newBuilder().startMetaFileGeneration(snapshotInstant); - initiateBackup(SNAPSHOT_FOLDER, backupRestoreUtil); - return dataStep.endMetaFileGeneration(); - } - - private File getValidSnapshot(File snapshotDir, String snapshotName) { - for (File fileName : snapshotDir.listFiles()) - if (fileName.exists() && fileName.isDirectory() && fileName.getName().matches(snapshotName)) - return fileName; - return null; - } - - @Override - public String getName() { - return JOBNAME; - } - - - private ColumnfamilyResult convertToColumnFamilyResult(String keyspace, String columnFamilyName, Map> filePrefixToFileMap) { - ColumnfamilyResult columnfamilyResult = new ColumnfamilyResult(keyspace, columnFamilyName); - filePrefixToFileMap.entrySet().forEach(sstableEntry -> { - ColumnfamilyResult.SSTableResult ssTableResult = new ColumnfamilyResult.SSTableResult(); - ssTableResult.setPrefix(sstableEntry.getKey()); - ssTableResult.setSstableComponents(sstableEntry.getValue()); - columnfamilyResult.addSstable(ssTableResult); - }); - return columnfamilyResult; - } - - @Override - protected void processColumnFamily(final String keyspace, final String columnFamily, final File backupDir) throws Exception { - File snapshotDir = getValidSnapshot(backupDir, snapshotName); - // Process this snapshot folder for the given columnFamily - if (snapshotDir == null) { - logger.warn("{} folder does not contain {} snapshots", backupDir, snapshotName); - return; - } - - logger.debug("Scanning for all SSTables in: {}", snapshotDir.getAbsolutePath()); - - Map> filePrefixToFileMap = new HashMap<>(); - Collection files = FileUtils.listFiles(snapshotDir, FileFilterUtils.fileFileFilter(), null); - - for (File file: files){ - if (!file.exists()) - continue; - - try { - String prefix = PrefixGenerator.getSSTFileBase(file.getName()); - - if (prefix == null && file.getName().equalsIgnoreCase(CASSANDRA_MANIFEST_FILE)) - prefix = "manifest"; - - if (prefix == null) { - logger.error("Unknown file type with no SSTFileBase found: ", file.getAbsolutePath()); - return; - } - - FileUploadResult fileUploadResult = FileUploadResult.getFileUploadResult(keyspace, columnFamily, file); - filePrefixToFileMap.putIfAbsent(prefix, new ArrayList<>()); - filePrefixToFileMap.get(prefix).add(fileUploadResult); - } catch (Exception e) { - /* If you are here it means either of the issues. In that case, do not upload the meta file. - * @throws UnsupportedOperationException - * if an attributes of the given type are not supported - * @throws IOException - * if an I/O error occurs - * @throws SecurityException - * In the case of the default provider, a security manager is - * installed, its {@link SecurityManager#checkRead(String) checkRead} - * method is invoked to check read access to the file. If this - * method is invoked to read security sensitive attributes then the - * security manager may be invoke to check for additional permissions. - */ - logger.error("Internal error while trying to generate FileUploadResult and/or reading FileAttributes for file: " + file.getAbsolutePath(), e); - throw e; - } - } - - ColumnfamilyResult columnfamilyResult = convertToColumnFamilyResult(keyspace, columnFamily, filePrefixToFileMap); - filePrefixToFileMap.clear(); //Release the resources. - - logger.debug("Starting the processing of KS: {}, CF: {}, No.of SSTables: {}", columnfamilyResult.getKeyspaceName(), columnfamilyResult.getColumnfamilyName(), columnfamilyResult.getSstables().size()); - - //TODO: Future - Ensure that all the files are en-queued for Upload. Use BackupCacheService (BCS) to find the - //location where files are uploaded and BackupUploadDownloadService(BUDS) to enque if they are not. - //Note that BUDS will be responsible for actually deleting the files after they are processed as they really should not be deleted unless they are successfully uploaded. - FileUtils.cleanDirectory(snapshotDir); - FileUtils.deleteDirectory(snapshotDir); - - dataStep.addColumnfamilyResult(columnfamilyResult); - logger.debug("Finished processing KS: {}, CF: {}", columnfamilyResult.getKeyspaceName(), columnfamilyResult.getColumnfamilyName()); - - } - - @Override - protected void addToRemotePath(String remotePath) { - //Do nothing - } - - //For testing purposes only. - void setSnapshotName(String snapshotName) { - this.snapshotName = snapshotName; - } -} diff --git a/priam/src/main/java/com/netflix/priam/tuner/CassandraTunerService.java b/priam/src/main/java/com/netflix/priam/tuner/CassandraTunerService.java new file mode 100644 index 000000000..ab9e6e611 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/tuner/CassandraTunerService.java @@ -0,0 +1,49 @@ +package com.netflix.priam.tuner; + +import com.netflix.priam.backup.IncrementalBackup; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.utils.RetryableCallable; +import javax.inject.Inject; + +public class CassandraTunerService implements IService { + private final PriamScheduler scheduler; + private final IConfiguration configuration; + private final IBackupRestoreConfig backupRestoreConfig; + + @Inject + public CassandraTunerService( + PriamScheduler priamScheduler, + IConfiguration configuration, + IBackupRestoreConfig backupRestoreConfig) { + this.scheduler = priamScheduler; + this.configuration = configuration; + this.backupRestoreConfig = backupRestoreConfig; + } + + @Override + public void scheduleService() throws Exception { + // Run the task to tune Cassandra + scheduler.runTaskNow(TuneCassandra.class); + } + + @Override + public void updateServicePre() throws Exception {} + + @Override + public void updateServicePost() throws Exception { + // Update the cassandra to enable/disable new incremental files. + new RetryableCallable(6, 10000) { + public Void retriableCall() throws Exception { + try (JMXNodeTool nodeTool = JMXNodeTool.instance(configuration)) { + nodeTool.setIncrementalBackupsEnabled( + IncrementalBackup.isEnabled(configuration, backupRestoreConfig)); + } + return null; + } + }.call(); + } +} diff --git a/priam/src/main/java/com/netflix/priam/tuner/GCTuner.java b/priam/src/main/java/com/netflix/priam/tuner/GCTuner.java index 9f8812cf2..c3f174b13 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/GCTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/GCTuner.java @@ -17,89 +17,88 @@ package com.netflix.priam.tuner; -import javax.inject.Singleton; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import javax.inject.Singleton; /** - * List of Garbage collection parameters for CMS/G1GC. This list is used to automatically enable/disable configurations, if found in jvm.options. - * Created by aagrawal on 8/23/17. + * List of Garbage collection parameters for CMS/G1GC. This list is used to automatically + * enable/disable configurations, if found in jvm.options. Created by aagrawal on 8/23/17. */ @Singleton public class GCTuner { -// private Set commonOptions = new HashSet<>(Arrays.asList( -// "-XX:ParallelGCThreads", -// "-XX:ConcGCThreads", -// "-XX:+ParallelRefProcEnabled", -// "-XX:+AlwaysPreTouch", -// "-XX:+UseTLAB", -// "-XX:+ResizeTLAB", -// "-XX:-UseBiasedLocking" -// )); + // private Set commonOptions = new HashSet<>(Arrays.asList( + // "-XX:ParallelGCThreads", + // "-XX:ConcGCThreads", + // "-XX:+ParallelRefProcEnabled", + // "-XX:+AlwaysPreTouch", + // "-XX:+UseTLAB", + // "-XX:+ResizeTLAB", + // "-XX:-UseBiasedLocking" + // )); - private static final Set cmsOptions = new HashSet<>(Arrays.asList( - "-XX:+UseConcMarkSweepGC", - "-XX:+UseParNewGC", - "-XX:+UseParallelGC", - "-XX:+CMSConcurrentMTEnabled", - "-XX:CMSInitiatingOccupancyFraction", - "-XX:+UseCMSInitiatingOccupancyOnly", - "-XX:+CMSClassUnloadingEnabled", - "-XX:+CMSIncrementalMode", - "-XX:+CMSPermGenSweepingEnabled", - "-XX:+ExplicitGCInvokesConcurrent", - "-XX:+ExplicitGCInvokesConcurrentAndUnloadsClasses", - "-XX:+DisableExplicitGC", - "-XX:+CMSParallelRemarkEnabled", - "-XX:SurvivorRatio", - "-XX:MaxTenuringThreshold", - "-XX:CMSWaitDuration", - "-XX:+CMSParallelInitialMarkEnabled", - "-XX:+CMSEdenChunksRecordAlways" - )); + private static final Set cmsOptions = + new HashSet<>( + Arrays.asList( + "-XX:+UseConcMarkSweepGC", + "-XX:+UseParNewGC", + "-XX:+UseParallelGC", + "-XX:+CMSConcurrentMTEnabled", + "-XX:CMSInitiatingOccupancyFraction", + "-XX:+UseCMSInitiatingOccupancyOnly", + "-XX:+CMSClassUnloadingEnabled", + "-XX:+CMSIncrementalMode", + "-XX:+CMSPermGenSweepingEnabled", + "-XX:+ExplicitGCInvokesConcurrent", + "-XX:+ExplicitGCInvokesConcurrentAndUnloadsClasses", + "-XX:+DisableExplicitGC", + "-XX:+CMSParallelRemarkEnabled", + "-XX:SurvivorRatio", + "-XX:MaxTenuringThreshold", + "-XX:CMSWaitDuration", + "-XX:+CMSParallelInitialMarkEnabled", + "-XX:+CMSEdenChunksRecordAlways")); - private static final Set g1gcOptions = new HashSet<>(Arrays.asList( - "-XX:+UseG1GC", - "-XX:G1HeapRegionSize", - "-XX:MaxGCPauseMillis", - "-XX:G1NewSizePercent", - "-XX:G1MaxNewSizePercent", - "-XX:-ResizePLAB", - "-XX:InitiatingHeapOccupancyPercent", - "-XX:G1MixedGCLiveThresholdPercent", - "-XX:G1HeapWastePercent", - "-XX:G1MixedGCCountTarget", - "-XX:G1OldCSetRegionThresholdPercent", - "-XX:G1ReservePercent", - "-XX:SoftRefLRUPolicyMSPerMB", - "-XX:G1ConcRefinementThreads", - "-XX:MaxGCPauseMillis", - "-XX:+UnlockExperimentalVMOptions", - "-XX:NewRatio", - "-XX:G1RSetUpdatingPauseTimePercent" - )); + private static final Set g1gcOptions = + new HashSet<>( + Arrays.asList( + "-XX:+UseG1GC", + "-XX:G1HeapRegionSize", + "-XX:MaxGCPauseMillis", + "-XX:G1NewSizePercent", + "-XX:G1MaxNewSizePercent", + "-XX:-ResizePLAB", + "-XX:InitiatingHeapOccupancyPercent", + "-XX:G1MixedGCLiveThresholdPercent", + "-XX:G1HeapWastePercent", + "-XX:G1MixedGCCountTarget", + "-XX:G1OldCSetRegionThresholdPercent", + "-XX:G1ReservePercent", + "-XX:SoftRefLRUPolicyMSPerMB", + "-XX:G1ConcRefinementThreads", + "-XX:MaxGCPauseMillis", + "-XX:+UnlockExperimentalVMOptions", + "-XX:NewRatio", + "-XX:G1RSetUpdatingPauseTimePercent")); - final static GCType getGCType(String option) { - if (cmsOptions.contains(option)) - return GCType.CMS; + static final GCType getGCType(String option) { + if (cmsOptions.contains(option)) return GCType.CMS; - if (g1gcOptions.contains(option)) - return GCType.G1GC; + if (g1gcOptions.contains(option)) return GCType.G1GC; return null; } - final static GCType getGCType(JVMOption jvmOption) - { + static final GCType getGCType(JVMOption jvmOption) { return getGCType(jvmOption.getJvmOption()); } - public Set getCmsOptions(){ + public Set getCmsOptions() { return cmsOptions; } - public Set getG1gcOptions(){ + public Set getG1gcOptions() { return g1gcOptions; } } diff --git a/priam/src/main/java/com/netflix/priam/tuner/GCType.java b/priam/src/main/java/com/netflix/priam/tuner/GCType.java index acc57bda4..bccd687e7 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/GCType.java +++ b/priam/src/main/java/com/netflix/priam/tuner/GCType.java @@ -23,17 +23,17 @@ import org.slf4j.LoggerFactory; /** - * Garbage collection types supported by Priam for Cassandra (CMS/G1GC). - * Created by aagrawal on 8/24/17. + * Garbage collection types supported by Priam for Cassandra (CMS/G1GC). Created by aagrawal on + * 8/24/17. */ public enum GCType { - CMS("CMS"),G1GC("G1GC"); + CMS("CMS"), + G1GC("G1GC"); private static final Logger logger = LoggerFactory.getLogger(GCType.class); private final String gcType; - GCType(String gcType) - { + GCType(String gcType) { this.gcType = gcType.toUpperCase(); } @@ -51,25 +51,32 @@ public enum GCType { * Illegal value |NA |False |UnsupportedTypeException */ - public static GCType lookup(String gcType, boolean acceptNullOrEmpty, boolean acceptIllegalValue) throws UnsupportedTypeException { + public static GCType lookup( + String gcType, boolean acceptNullOrEmpty, boolean acceptIllegalValue) + throws UnsupportedTypeException { if (StringUtils.isEmpty(gcType)) - if(acceptNullOrEmpty) - return null; - else - { - String message = String.format("%s is not a supported GC Type. Supported values are %s", gcType, getSupportedValues()); + if (acceptNullOrEmpty) return null; + else { + String message = + String.format( + "%s is not a supported GC Type. Supported values are %s", + gcType, getSupportedValues()); logger.error(message); throw new UnsupportedTypeException(message); } - try{ + try { return GCType.valueOf(gcType.toUpperCase()); - }catch (IllegalArgumentException ex) - { - String message = String.format("%s is not a supported GCType. Supported values are %s", gcType, getSupportedValues()); + } catch (IllegalArgumentException ex) { + String message = + String.format( + "%s is not a supported GCType. Supported values are %s", + gcType, getSupportedValues()); if (acceptIllegalValue) { - message = message + ". Since acceptIllegalValue is set to True, returning NULL instead."; + message = + message + + ". Since acceptIllegalValue is set to True, returning NULL instead."; logger.error(message); return null; } @@ -79,13 +86,11 @@ public static GCType lookup(String gcType, boolean acceptNullOrEmpty, boolean ac } } - private static String getSupportedValues() - { - StringBuffer supportedValues = new StringBuffer(); + private static String getSupportedValues() { + StringBuilder supportedValues = new StringBuilder(); boolean first = true; for (GCType type : GCType.values()) { - if (!first) - supportedValues.append(","); + if (!first) supportedValues.append(","); supportedValues.append(type); first = false; } @@ -93,14 +98,11 @@ private static String getSupportedValues() return supportedValues.toString(); } - public static GCType lookup(String gcType) throws UnsupportedTypeException - { + public static GCType lookup(String gcType) throws UnsupportedTypeException { return lookup(gcType, false, false); } - public String getGcType() - { + public String getGcType() { return gcType; } - } diff --git a/priam/src/main/java/com/netflix/priam/tuner/ICassandraTuner.java b/priam/src/main/java/com/netflix/priam/tuner/ICassandraTuner.java index 69a88d522..9af05c409 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/ICassandraTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/ICassandraTuner.java @@ -1,28 +1,25 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.tuner; import com.google.inject.ImplementedBy; - import java.io.IOException; @ImplementedBy(StandardTuner.class) -public interface ICassandraTuner -{ - void writeAllProperties(String yamlLocation, String hostname, String seedProvider) throws Exception; +public interface ICassandraTuner { + void writeAllProperties(String yamlLocation, String hostname, String seedProvider) + throws Exception; void updateAutoBootstrap(String yamlLocation, boolean autobootstrap) throws IOException; diff --git a/priam/src/main/java/com/netflix/priam/tuner/JVMOption.java b/priam/src/main/java/com/netflix/priam/tuner/JVMOption.java index 3cf54bc1e..a8c578d33 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/JVMOption.java +++ b/priam/src/main/java/com/netflix/priam/tuner/JVMOption.java @@ -16,24 +16,22 @@ */ package com.netflix.priam.tuner; -import org.apache.commons.lang3.StringUtils; - import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.lang3.StringUtils; -/** - * POJO to parse and store the JVM option from jvm.options file. - * Created by aagrawal on 8/28/17. - */ +/** POJO to parse and store the JVM option from jvm.options file. Created by aagrawal on 8/28/17. */ public class JVMOption { private String jvmOption; private String value; private boolean isCommented; private boolean isHeapJVMOption; private static final Pattern pattern = Pattern.compile("(#)*(-[^=]+)=?(.*)?"); - //A new pattern is required because heap do not separate JVM key,value with "=". - private static final Pattern heapPattern = Pattern.compile("(#)*(-Xm[x|s|n])([0-9]+[K|M|G])?"); //Pattern.compile("(#)*-(Xm[x|s|n])([0-9]+)(K|M|G)?"); + // A new pattern is required because heap do not separate JVM key,value with "=". + private static final Pattern heapPattern = + Pattern.compile( + "(#)*(-Xm[x|s|n])([0-9]+[K|M|G])?"); // Pattern.compile("(#)*-(Xm[x|s|n])([0-9]+)(K|M|G)?"); public JVMOption(String jvmOption) { this.jvmOption = jvmOption; @@ -47,13 +45,11 @@ public JVMOption(String jvmOption, String value, boolean isCommented, boolean is } public String toJVMOptionString() { - final StringBuffer sb = new StringBuffer(); - if (isCommented) - sb.append("#"); + final StringBuilder sb = new StringBuilder(); + if (isCommented) sb.append("#"); sb.append(jvmOption); if (value != null) { - if (!isHeapJVMOption) - sb.append("="); + if (!isHeapJVMOption) sb.append("="); sb.append(value); } return sb.toString(); @@ -64,10 +60,10 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JVMOption jvmOption1 = (JVMOption) o; - return isCommented == jvmOption1.isCommented && - isHeapJVMOption == jvmOption1.isHeapJVMOption && - Objects.equals(jvmOption, jvmOption1.jvmOption) && - Objects.equals(value, jvmOption1.value); + return isCommented == jvmOption1.isCommented + && isHeapJVMOption == jvmOption1.isHeapJVMOption + && Objects.equals(jvmOption, jvmOption1.jvmOption) + && Objects.equals(value, jvmOption1.value); } @Override @@ -90,8 +86,7 @@ public String getValue() { } public JVMOption setValue(String value) { - if (!StringUtils.isEmpty(value)) - this.value = value; + if (!StringUtils.isEmpty(value)) this.value = value; return this; } @@ -116,18 +111,23 @@ public JVMOption setHeapJVMOption(boolean heapJVMOption) { public static JVMOption parse(String line) { JVMOption result = null; - //See if it is heap JVM option. + // See if it is heap JVM option. Matcher matcher = heapPattern.matcher(line); if (matcher.matches()) { boolean isCommented = (matcher.group(1) != null); - return new JVMOption(matcher.group(2)).setCommented(isCommented).setValue(matcher.group(3)).setHeapJVMOption(true); + return new JVMOption(matcher.group(2)) + .setCommented(isCommented) + .setValue(matcher.group(3)) + .setHeapJVMOption(true); } - //See if other heap option. + // See if other heap option. matcher = pattern.matcher(line); if (matcher.matches()) { boolean isCommented = (matcher.group(1) != null); - return new JVMOption(matcher.group(2)).setCommented(isCommented).setValue(matcher.group(3)); + return new JVMOption(matcher.group(2)) + .setCommented(isCommented) + .setValue(matcher.group(3)); } return result; diff --git a/priam/src/main/java/com/netflix/priam/tuner/JVMOptionsTuner.java b/priam/src/main/java/com/netflix/priam/tuner/JVMOptionsTuner.java index 2998fff80..9f771ad57 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/JVMOptionsTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/JVMOptionsTuner.java @@ -19,21 +19,20 @@ import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.nio.file.Files; import java.util.*; import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is to tune the jvm.options file introduced in Cassandra 3.x to pass JVM parameters to Cassandra. - * It supports configuring GC type (CMS/G1GC) where it automatically activates default properties as provided in - * jvm.options file. Note that this will not "add" any GC options. - *

- * Created by aagrawal on 8/23/17. + * This is to tune the jvm.options file introduced in Cassandra 3.x to pass JVM parameters to + * Cassandra. It supports configuring GC type (CMS/G1GC) where it automatically activates default + * properties as provided in jvm.options file. Note that this will not "add" any GC options. + * + *

Created by aagrawal on 8/23/17. */ public class JVMOptionsTuner { private static final Logger logger = LoggerFactory.getLogger(JVMOptionsTuner.class); @@ -50,7 +49,8 @@ public JVMOptionsTuner(IConfiguration config) { * configuring GC {@link IConfiguration#getGCType()}etc. * * @param outputFile File name with which this configured JVM options should be written. - * @throws Exception when encountered with invalid configured GC type. {@link IConfiguration#getGCType()} + * @throws Exception when encountered with invalid configured GC type. {@link + * IConfiguration#getGCType()} */ public void updateAndSaveJVMOptions(final String outputFile) throws Exception { List configuredJVMOptions = updateJVMOptions(); @@ -61,51 +61,61 @@ public void updateAndSaveJVMOptions(final String outputFile) throws Exception { logger.info("Updating jvm.options with following values: " + buffer.toString()); } - //Verify we can write to output file and it is not directory. + // Verify we can write to output file and it is not directory. File file = new File(outputFile); if (file.exists() && !file.canWrite()) { throw new Exception("Not enough permissions to write to file: " + outputFile); } - //Write jvm.options back to override defaults. + // Write jvm.options back to override defaults. Files.write(new File(outputFile).toPath(), configuredJVMOptions); } /** - * Update the JVM options file for cassandra by updating/removing JVM options - * {@link IConfiguration#getJVMExcludeSet()} and {@link IConfiguration#getJVMUpsertSet()}, - * configuring GC {@link IConfiguration#getGCType()}etc. + * Update the JVM options file for cassandra by updating/removing JVM options {@link + * IConfiguration#getJVMExcludeSet()} and {@link IConfiguration#getJVMUpsertSet()}, configuring + * GC {@link IConfiguration#getGCType()}etc. * * @return List of Configuration as String after reading the configuration from jvm.options - * @throws Exception when encountered with invalid configured GC type. {@link IConfiguration#getGCType()} + * @throws Exception when encountered with invalid configured GC type. {@link + * IConfiguration#getGCType()} */ protected List updateJVMOptions() throws Exception { File jvmOptionsFile = new File(config.getJVMOptionsFileLocation()); validate(jvmOptionsFile); final GCType configuredGC = config.getGCType(); - final Map excludeSet = config.getJVMExcludeSet(); + final Map excludeSet = + JVMOptionsTuner.parseJVMOptions(config.getJVMExcludeSet()); - //Make a copy of upsertSet, so we can delete the entries as we process them. - Map upsertSet = config.getJVMUpsertSet(); + // Make a copy of upsertSet, so we can delete the entries as we process them. + Map upsertSet = + JVMOptionsTuner.parseJVMOptions(config.getJVMUpsertSet()); - //Don't use streams for processing as upsertSet jvm options needs to be removed if we find them - //already in jvm.options file. - List optionsFromFile = Files.lines(jvmOptionsFile.toPath()).collect(Collectors.toList()); + // Don't use streams for processing as upsertSet jvm options needs to be removed if we find + // them + // already in jvm.options file. + List optionsFromFile = + Files.lines(jvmOptionsFile.toPath()).collect(Collectors.toList()); List configuredOptions = new LinkedList<>(); for (String line : optionsFromFile) { - configuredOptions.add(updateConfigurationValue(line, configuredGC, upsertSet, excludeSet)); + configuredOptions.add( + updateConfigurationValue(line, configuredGC, upsertSet, excludeSet)); } - //Add all the upserts(inserts only left) from config. + // Add all the upserts(inserts only left) from config. if (upsertSet != null && !upsertSet.isEmpty()) { configuredOptions.add("#################"); configuredOptions.add("# USER PROVIDED CUSTOM JVM CONFIGURATIONS #"); configuredOptions.add("#################"); - configuredOptions.addAll(upsertSet.values().stream() - .map(jvmOption -> jvmOption.toJVMOptionString()).collect(Collectors.toList())); + configuredOptions.addAll( + upsertSet + .values() + .stream() + .map(JVMOption::toJVMOptionString) + .collect(Collectors.toList())); } return configuredOptions; @@ -117,27 +127,31 @@ private void setHeapSetting(String configuredValue, JVMOption option) { } /** - * @param line a line as read from jvm.options file. + * @param line a line as read from jvm.options file. * @param configuredGC GCType configured by user for Cassandra. - * @param upsertSet configured upsert set of JVM properties as provided by user for Cassandra. - * @param excludeSet configured exclude set of JVM properties as provided by user for Cassandra. - * @return the "comment" as is, if not a valid JVM option. Else, a string representation of JVM option + * @param upsertSet configured upsert set of JVM properties as provided by user for Cassandra. + * @param excludeSet configured exclude set of JVM properties as provided by user for Cassandra. + * @return the "comment" as is, if not a valid JVM option. Else, a string representation of JVM + * option */ - private String updateConfigurationValue(final String line, GCType configuredGC, Map upsertSet, Map excludeSet) { + private String updateConfigurationValue( + final String line, + GCType configuredGC, + Map upsertSet, + Map excludeSet) { JVMOption option = JVMOption.parse(line); - if (option == null) - return line; + if (option == null) return line; - //Is parameter for heap setting. + // Is parameter for heap setting. if (option.isHeapJVMOption()) { - String configuredValue = null; + String configuredValue; switch (option.getJvmOption()) { - //Special handling for heap new size ("Xmn") + // Special handling for heap new size ("Xmn") case "-Xmn": configuredValue = config.getHeapNewSize(); break; - //Set min and max heap size to same value + // Set min and max heap size to same value default: configuredValue = config.getHeapSize(); break; @@ -172,28 +186,32 @@ private String updateConfigurationValue(final String line, GCType configuredGC, private void validate(File jvmOptionsFile) throws Exception { if (!jvmOptionsFile.exists()) - throw new Exception("JVM Option File does not exist: " + jvmOptionsFile.getAbsolutePath()); + throw new Exception( + "JVM Option File does not exist: " + jvmOptionsFile.getAbsolutePath()); if (jvmOptionsFile.isDirectory()) - throw new Exception("JVM Option File is a directory: " + jvmOptionsFile.getAbsolutePath()); + throw new Exception( + "JVM Option File is a directory: " + jvmOptionsFile.getAbsolutePath()); if (!jvmOptionsFile.canRead() || !jvmOptionsFile.canWrite()) - throw new Exception("JVM Option File does not have right permission: " + jvmOptionsFile.getAbsolutePath()); - + throw new Exception( + "JVM Option File does not have right permission: " + + jvmOptionsFile.getAbsolutePath()); } /** - * Util function to parse comma separated list of jvm options to a Map (jvmOptionName, JVMOption). - * It will ignore anything which is not a valid JVM option. + * Util function to parse comma separated list of jvm options to a Map (jvmOptionName, + * JVMOption). It will ignore anything which is not a valid JVM option. * * @param property comma separated list of JVM options. * @return Map of (jvmOptionName, JVMOption). */ public static final Map parseJVMOptions(String property) { - if (StringUtils.isEmpty(property)) - return null; - return new HashSet(Arrays.asList(property.split(","))).stream() - .map(line -> JVMOption.parse(line)).filter(jvmOption -> jvmOption != null).collect(Collectors.toMap(jvmOption -> jvmOption.getJvmOption(), jvmOption -> jvmOption)); + if (StringUtils.isEmpty(property)) return null; + return new HashSet<>(Arrays.asList(property.split(","))) + .stream() + .map(JVMOption::parse) + .filter(Objects::nonNull) + .collect(Collectors.toMap(JVMOption::getJvmOption, jvmOption -> jvmOption)); } - } diff --git a/priam/src/main/java/com/netflix/priam/tuner/PropertiesFileTuner.java b/priam/src/main/java/com/netflix/priam/tuner/PropertiesFileTuner.java new file mode 100644 index 000000000..818816707 --- /dev/null +++ b/priam/src/main/java/com/netflix/priam/tuner/PropertiesFileTuner.java @@ -0,0 +1,56 @@ +package com.netflix.priam.tuner; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Splitter; +import com.google.inject.Inject; +import com.netflix.priam.config.IConfiguration; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Map; +import org.apache.commons.configuration2.PropertiesConfiguration; +import org.apache.commons.configuration2.ex.ConfigurationException; +import org.apache.commons.io.FilenameUtils; +import org.apache.commons.text.StringSubstitutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Support tuning standard .properties files + * + *

+ */ +public class PropertiesFileTuner { + private static final Logger logger = LoggerFactory.getLogger(PropertiesFileTuner.class); + protected final IConfiguration config; + + @Inject + public PropertiesFileTuner(IConfiguration config) { + this.config = config; + } + + @SuppressWarnings("unchecked") + public void updateAndSaveProperties(String propertyFile) + throws IOException, ConfigurationException { + try { + PropertiesConfiguration properties = new PropertiesConfiguration(); + properties.getLayout().load(properties, new FileReader(propertyFile)); + String overrides = + config.getProperty( + "propertyOverrides." + FilenameUtils.getBaseName(propertyFile), null); + if (overrides != null) { + // Allow use of the IConfiguration object as template strings + Map map = new ObjectMapper().convertValue(config, Map.class); + String resolvedOverrides = new StringSubstitutor(map).replace(overrides); + Splitter.on(",") + .withKeyValueSeparator("=") + .split(resolvedOverrides) + .forEach(properties::setProperty); + } + properties.getLayout().save(properties, new FileWriter(propertyFile)); + } catch (IOException | ConfigurationException e) { + logger.error("Could not tune " + propertyFile + ". Does it exist? Is it writable?", e); + throw e; + } + } +} diff --git a/priam/src/main/java/com/netflix/priam/tuner/StandardTuner.java b/priam/src/main/java/com/netflix/priam/tuner/StandardTuner.java index c7636afa9..41482d509 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/StandardTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/StandardTuner.java @@ -1,66 +1,73 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.tuner; import com.google.common.collect.Lists; import com.google.inject.Inject; +import com.netflix.priam.backup.IncrementalBackup; +import com.netflix.priam.config.IBackupRestoreConfig; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.SnapshotBackup; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.restore.Restore; -import org.apache.commons.collections4.CollectionUtils; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - import java.io.*; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; /** - * Tune the standard cassandra parameters/configurations. eg. cassandra.yaml, jvm.options, bootstrap etc. + * Tune the standard cassandra parameters/configurations. eg. cassandra.yaml, jvm.options, bootstrap + * etc. */ public class StandardTuner implements ICassandraTuner { private static final Logger logger = LoggerFactory.getLogger(StandardTuner.class); protected final IConfiguration config; + protected final IBackupRestoreConfig backupRestoreConfig; + private final InstanceInfo instanceInfo; @Inject - public StandardTuner(IConfiguration config) { + public StandardTuner( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + InstanceInfo instanceInfo) { this.config = config; + this.backupRestoreConfig = backupRestoreConfig; + this.instanceInfo = instanceInfo; } - public void writeAllProperties(String yamlLocation, String hostname, String seedProvider) throws Exception { + @SuppressWarnings("unchecked") + public void writeAllProperties(String yamlLocation, String hostname, String seedProvider) + throws Exception { DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml(options); File yamlFile = new File(yamlLocation); - Map map = (Map) yaml.load(new FileInputStream(yamlFile)); + Map map = yaml.load(new FileInputStream(yamlFile)); map.put("cluster_name", config.getAppName()); map.put("storage_port", config.getStoragePort()); map.put("ssl_storage_port", config.getSSLStoragePort()); - map.put("start_rpc", config.isThriftEnabled()); - map.put("rpc_port", config.getThriftPort()); map.put("start_native_transport", config.isNativeTransportEnabled()); map.put("native_transport_port", config.getNativeTransportPort()); map.put("listen_address", hostname); map.put("rpc_address", hostname); - //Dont bootstrap in restore mode - if (!Restore.isRestoreEnabled(config)) { + // Dont bootstrap in restore mode + if (!Restore.isRestoreEnabled(config, instanceInfo)) { map.put("auto_bootstrap", config.getAutoBoostrap()); } else { map.put("auto_bootstrap", false); @@ -71,7 +78,7 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed map.put("hints_directory", config.getHintsLocation()); map.put("data_file_directories", Lists.newArrayList(config.getDataFileLocation())); - boolean enableIncremental = (SnapshotBackup.isBackupEnabled(config) && config.isIncrBackup()) && (CollectionUtils.isEmpty(config.getBackupRacs()) || config.getBackupRacs().contains(config.getRac())); + boolean enableIncremental = IncrementalBackup.isEnabled(config, backupRestoreConfig); map.put("incremental_backups", enableIncremental); map.put("endpoint_snitch", config.getSnitch()); @@ -79,7 +86,9 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed map.remove("in_memory_compaction_limit_in_mb"); } map.put("compaction_throughput_mb_per_sec", config.getCompactionThroughput()); - map.put("partitioner", derivePartitioner(map.get("partitioner").toString(), config.getPartitioner())); + map.put( + "partitioner", + derivePartitioner(map.get("partitioner").toString(), config.getPartitioner())); if (map.containsKey("memtable_total_space_in_mb")) { map.remove("memtable_total_space_in_mb"); @@ -94,6 +103,7 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed map.put("hinted_handoff_throttle_in_kb", config.getHintedHandoffThrottleKb()); map.put("authenticator", config.getAuthenticator()); map.put("authorizer", config.getAuthorizer()); + map.put("role_manager", config.getRoleManager()); map.put("internode_compression", config.getInternodeCompression()); map.put("dynamic_snitch", config.isDynamicSnitchEnabled()); @@ -101,20 +111,19 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed map.put("concurrent_writes", config.getConcurrentWritesCnt()); map.put("concurrent_compactors", config.getConcurrentCompactorsCnt()); - map.put("rpc_server_type", config.getRpcServerType()); - map.put("rpc_min_threads", config.getRpcMinThreads()); - map.put("rpc_max_threads", config.getRpcMaxThreads()); - // Add private ip address as broadcast_rpc_address. This will ensure that COPY function works correctly. - map.put("broadcast_rpc_address", config.getInstanceDataRetriever().getPrivateIP()); - //map.put("index_interval", config.getIndexInterval()); - + // Add private ip address as broadcast_rpc_address. This will ensure that COPY function + // works correctly. + map.put("broadcast_rpc_address", instanceInfo.getPrivateIP()); map.put("tombstone_warn_threshold", config.getTombstoneWarnThreshold()); map.put("tombstone_failure_threshold", config.getTombstoneFailureThreshold()); - map.put("streaming_socket_timeout_in_ms", config.getStreamingSocketTimeoutInMS()); + map.put("streaming_keep_alive_period", config.getStreamingKeepAlivePeriod() + "s"); map.put("memtable_cleanup_threshold", config.getMemtableCleanupThreshold()); - map.put("compaction_large_partition_warning_threshold_mb", config.getCompactionLargePartitionWarnThresholdInMB()); + map.put( + "compaction_large_partition_warning_threshold_mb", + config.getCompactionLargePartitionWarnThresholdInMB()); + map.put("disk_access_mode", config.getDiskAccessMode()); List seedp = (List) map.get("seed_provider"); Map m = (Map) seedp.get(0); @@ -122,22 +131,40 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed configfureSecurity(map); configureGlobalCaches(config, map); - //force to 1 until vnodes are properly supported + // force to 1 until vnodes are properly supported map.put("num_tokens", 1); - + // Additional C* Yaml properties, which can be set via Priam.extra.params addExtraCassParams(map); - //remove troublesome properties + // Custom specific C* yaml properties which might not be available in Apache C* OSS + addCustomCassParams(map); + + // remove troublesome properties map.remove("flush_largest_memtables_at"); map.remove("reduce_cache_capacity_to"); logger.info(yaml.dump(map)); yaml.dump(map, new FileWriter(yamlFile)); + // TODO: port commit log backups to the PropertiesFileTuner implementation configureCommitLogBackups(); + + PropertiesFileTuner propertyTuner = new PropertiesFileTuner(config); + for (String propertyFile : config.getTunablePropertyFiles()) { + propertyTuner.updateAndSaveProperties(propertyFile); + } } + /** + * This method can be overwritten in child classes for any additional tunings to C* Yaml. + * Default implementation is left empty intentionally for child classes to override. This is + * useful when custom YAML properties are supported in deployed C*. + * + * @param map + */ + protected void addCustomCassParams(Map map) {} + /** * Overridable by derived classes to inject a wrapper snitch. * @@ -147,66 +174,61 @@ protected String getSnitch() { return config.getSnitch(); } - /** - * Setup the cassandra 1.1 global cache values - */ + /** Setup the cassandra 1.1 global cache values */ private void configureGlobalCaches(IConfiguration config, Map yaml) { final String keyCacheSize = config.getKeyCacheSizeInMB(); - if (keyCacheSize != null) { + if (!StringUtils.isEmpty(keyCacheSize)) { yaml.put("key_cache_size_in_mb", Integer.valueOf(keyCacheSize)); final String keyCount = config.getKeyCacheKeysToSave(); - if (keyCount != null) + if (!StringUtils.isEmpty(keyCount)) yaml.put("key_cache_keys_to_save", Integer.valueOf(keyCount)); } final String rowCacheSize = config.getRowCacheSizeInMB(); - if (rowCacheSize != null) { + if (!StringUtils.isEmpty(rowCacheSize)) { yaml.put("row_cache_size_in_mb", Integer.valueOf(rowCacheSize)); final String rowCount = config.getRowCacheKeysToSave(); - if (rowCount != null) + if (!StringUtils.isEmpty(rowCount)) yaml.put("row_cache_keys_to_save", Integer.valueOf(rowCount)); } } String derivePartitioner(String fromYaml, String fromConfig) { - if (fromYaml == null || fromYaml.isEmpty()) - return fromConfig; - //this check is to prevent against overwriting an existing yaml file that has + if (fromYaml == null || fromYaml.isEmpty()) return fromConfig; + // this check is to prevent against overwriting an existing yaml file that has // a partitioner not RandomPartitioner or (as of cass 1.2) Murmur3Partitioner. - //basically we don't want to hose existing deployments by changing the partitioner unexpectedly on them + // basically we don't want to hose existing deployments by changing the partitioner + // unexpectedly on them final String lowerCase = fromYaml.toLowerCase(); - if (lowerCase.contains("randomparti") || lowerCase.contains("murmur")) - return fromConfig; + if (lowerCase.contains("randomparti") || lowerCase.contains("murmur")) return fromConfig; return fromYaml; } protected void configfureSecurity(Map map) { - //the client-side ssl settings + // the client-side ssl settings Map clientEnc = (Map) map.get("client_encryption_options"); clientEnc.put("enabled", config.isClientSslEnabled()); - //the server-side (internode) ssl settings + // the server-side (internode) ssl settings Map serverEnc = (Map) map.get("server_encryption_options"); serverEnc.put("internode_encryption", config.getInternodeEncryption()); } - protected void configureCommitLogBackups() throws IOException { - if (!config.isBackingUpCommitLogs()) - return; + protected void configureCommitLogBackups() { + if (!config.isBackingUpCommitLogs()) return; Properties props = new Properties(); props.put("archive_command", config.getCommitLogBackupArchiveCmd()); props.put("restore_command", config.getCommitLogBackupRestoreCmd()); props.put("restore_directories", config.getCommitLogBackupRestoreFromDirs()); props.put("restore_point_in_time", config.getCommitLogBackupRestorePointInTime()); - FileOutputStream fos = null; - try { - fos = new FileOutputStream(new File(config.getCommitLogBackupPropsFile())); + try (FileOutputStream fos = + new FileOutputStream(new File(config.getCommitLogBackupPropsFile()))) { props.store(fos, "cassandra commit log archive props, as written by priam"); - } finally { - IOUtils.closeQuietly(fos); + } catch (IOException e) { + logger.error("Could not store commitlog_archiving.properties", e); } } @@ -215,8 +237,8 @@ public void updateAutoBootstrap(String yamlFile, boolean autobootstrap) throws I options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml(options); @SuppressWarnings("rawtypes") - Map map = (Map) yaml.load(new FileInputStream(yamlFile)); - //Dont bootstrap in restore mode + Map map = yaml.load(new FileInputStream(yamlFile)); + // Dont bootstrap in restore mode map.put("auto_bootstrap", autobootstrap); if (logger.isInfoEnabled()) { logger.info("Updating yaml: " + yaml.dump(map)); @@ -227,26 +249,48 @@ public void updateAutoBootstrap(String yamlFile, boolean autobootstrap) throws I @Override public final void updateJVMOptions() throws Exception { JVMOptionsTuner tuner = new JVMOptionsTuner(config); - //Overwrite default jvm.options file. + // Overwrite default jvm.options file. tuner.updateAndSaveJVMOptions(config.getJVMOptionsFileLocation()); } public void addExtraCassParams(Map map) { String params = config.getExtraConfigParams(); - if (params == null) { + if (StringUtils.isEmpty(params)) { logger.info("Updating yaml: no extra cass params"); return; } String[] pairs = params.split(","); logger.info("Updating yaml: adding extra cass params"); - for (int i = 0; i < pairs.length; i++) { - String[] pair = pairs[i].split("="); + for (String pair1 : pairs) { + String[] pair = pair1.split("="); String priamKey = pair[0]; String cassKey = pair[1]; String cassVal = config.getCassYamlVal(priamKey); - logger.info("Updating yaml: Priamkey[{}], CassKey[{}], Val[{}]", priamKey, cassKey, cassVal); - map.put(cassKey, cassVal); + + if (!StringUtils.isBlank(cassKey) && !StringUtils.isBlank(cassVal)) { + if (!cassKey.contains(".")) { + logger.info( + "Updating yaml: PriamKey: [{}], Key: [{}], OldValue: [{}], NewValue: [{}]", + priamKey, + cassKey, + map.get(cassKey), + cassVal); + map.put(cassKey, cassVal); + } else { + // split the cassandra key. We will get the group and get the key name. + String[] cassKeySplit = cassKey.split("\\."); + Map cassKeyMap = ((Map) map.getOrDefault(cassKeySplit[0], new HashMap())); + map.putIfAbsent(cassKeySplit[0], cassKeyMap); + logger.info( + "Updating yaml: PriamKey: [{}], Key: [{}], OldValue: [{}], NewValue: [{}]", + priamKey, + cassKey, + cassKeyMap.get(cassKeySplit[1]), + cassVal); + cassKeyMap.put(cassKeySplit[1], cassVal); + } + } } } } diff --git a/priam/src/main/java/com/netflix/priam/tuner/TuneCassandra.java b/priam/src/main/java/com/netflix/priam/tuner/TuneCassandra.java index bf586cca1..eacbd6a4f 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/TuneCassandra.java +++ b/priam/src/main/java/com/netflix/priam/tuner/TuneCassandra.java @@ -22,23 +22,24 @@ import com.netflix.priam.scheduler.SimpleTimer; import com.netflix.priam.scheduler.Task; import com.netflix.priam.scheduler.TaskTimer; +import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - /** - * Tune Cassandra (Open source or DSE) via updating various configuration files (dse.yaml, cassandra.yaml, jvm.options etc) + * Tune Cassandra (Open source or DSE) via updating various configuration files (dse.yaml, + * cassandra.yaml, jvm.options etc) */ @Singleton public class TuneCassandra extends Task { private static final String JOBNAME = "Tune-Cassandra"; private static final Logger LOGGER = LoggerFactory.getLogger(TuneCassandra.class); private final ICassandraTuner tuner; - private InstanceState instanceState; + private final InstanceState instanceState; @Inject - public TuneCassandra(IConfiguration config, ICassandraTuner tuner, InstanceState instanceState) { + public TuneCassandra( + IConfiguration config, ICassandraTuner tuner, InstanceState instanceState) { super(config); this.tuner = tuner; this.instanceState = instanceState; @@ -50,20 +51,20 @@ public void execute() throws Exception { while (!isDone) { try { - tuner.writeAllProperties(config.getYamlLocation(), null, config.getSeedProviderName()); + tuner.writeAllProperties( + config.getYamlLocation(), null, config.getSeedProviderName()); tuner.updateJVMOptions(); isDone = true; instanceState.setYmlWritten(true); } catch (IOException e) { - LOGGER.error("Fail wrting cassandra.yml file. Retry again!", e); + LOGGER.error("Fail writing cassandra.yml file. Retry again!", e); } } - } @Override public String getName() { - return "Tune-Cassandra"; + return JOBNAME; } public static TaskTimer getTimer() { diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerLog4J.java b/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerLog4J.java index 66cbc9d4b..1d0e820b1 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerLog4J.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerLog4J.java @@ -21,24 +21,22 @@ import com.google.common.io.Files; import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; -import org.apache.cassandra.io.util.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.BufferedWriter; import java.io.File; import java.nio.charset.Charset; import java.util.List; +import org.apache.cassandra.io.util.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Dse tuner for audit log via log4j. - * Use this instead of AuditLogTunerYaml if you are on DSE version 3.x. - * Created by aagrawal on 8/8/17. + * Dse tuner for audit log via log4j. Use this instead of AuditLogTunerYaml if you are on DSE + * version 3.x. Created by aagrawal on 8/8/17. */ public class AuditLogTunerLog4J implements IAuditLogTuner { - private IConfiguration config; - private IDseConfiguration dseConfig; + private final IConfiguration config; + private final IDseConfiguration dseConfig; protected static final String AUDIT_LOG_ADDITIVE_ENTRY = "log4j.additivity.DataAudit"; protected static final String AUDIT_LOG_FILE = "/conf/log4j-server.properties"; protected static final String PRIMARY_AUDIT_LOG_ENTRY = "log4j.logger.DataAudit"; @@ -51,18 +49,22 @@ public AuditLogTunerLog4J(IConfiguration config, IDseConfiguration dseConfig) { } /** - * Note: supporting the direct hacking of a log4j props file is far from elegant, - * but seems less odious than other solutions I've come up with. - * Operates under the assumption that the only people mucking with the audit log - * entries in the value are DataStax themselves and this program, and that the original - * property names are somehow still preserved. Otherwise, YMMV. + * Note: supporting the direct hacking of a log4j props file is far from elegant, but seems less + * odious than other solutions I've come up with. Operates under the assumption that the only + * people mucking with the audit log entries in the value are DataStax themselves and this + * program, and that the original property names are somehow still preserved. Otherwise, YMMV. */ public void tuneAuditLog() { BufferedWriter writer = null; try { final File srcFile = new File(config.getCassHome() + AUDIT_LOG_FILE); final List lines = Files.readLines(srcFile, Charset.defaultCharset()); - final File backupFile = new File(config.getCassHome() + AUDIT_LOG_FILE + "." + System.currentTimeMillis()); + final File backupFile = + new File( + config.getCassHome() + + AUDIT_LOG_FILE + + "." + + System.currentTimeMillis()); Files.move(srcFile, backupFile); writer = Files.newWriter(srcFile, Charset.defaultCharset()); @@ -70,30 +72,37 @@ public void tuneAuditLog() { try { loggerPrefix += findAuditLoggerName(lines); } catch (IllegalStateException ise) { - logger.warn("cannot locate " + PRIMARY_AUDIT_LOG_ENTRY + " property, will ignore any audit log updating"); + logger.warn( + "cannot locate " + + PRIMARY_AUDIT_LOG_ENTRY + + " property, will ignore any audit log updating"); return; } for (String line : lines) { - if (line.contains(loggerPrefix) || line.contains(PRIMARY_AUDIT_LOG_ENTRY) || line.contains(AUDIT_LOG_ADDITIVE_ENTRY)) { + if (line.contains(loggerPrefix) + || line.contains(PRIMARY_AUDIT_LOG_ENTRY) + || line.contains(AUDIT_LOG_ADDITIVE_ENTRY)) { if (dseConfig.isAuditLogEnabled()) { - //first, check to see if we need to uncomment the line + // first, check to see if we need to uncomment the line while (line.startsWith("#")) { line = line.substring(1); } - //next, check if we need to change the prop's value + // next, check if we need to change the prop's value if (line.contains("ActiveCategories")) { - final String cats = Joiner.on(",").join(dseConfig.getAuditLogCategories()); + final String cats = + Joiner.on(",").join(dseConfig.getAuditLogCategories()); line = line.substring(0, line.indexOf("=") + 1).concat(cats); } else if (line.contains("ExemptKeyspaces")) { - line = line.substring(0, line.indexOf("=") + 1).concat(dseConfig.getAuditLogExemptKeyspaces()); + line = + line.substring(0, line.indexOf("=") + 1) + .concat(dseConfig.getAuditLogExemptKeyspaces()); } } else { if (line.startsWith("#")) { - //make sure there's only one # at the beginning of the line - while (line.charAt(1) == '#') - line = line.substring(1); + // make sure there's only one # at the beginning of the line + while (line.charAt(1) == '#') line = line.substring(1); } else { line = "#" + line; } @@ -111,8 +120,7 @@ public void tuneAuditLog() { } } - - private final String findAuditLoggerName(List lines) throws IllegalStateException { + private String findAuditLoggerName(List lines) throws IllegalStateException { for (final String l : lines) { if (l.contains(PRIMARY_AUDIT_LOG_ENTRY)) { final String[] valTokens = l.split(","); diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerYaml.java b/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerYaml.java index 45fccea95..efb2b1b71 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerYaml.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/AuditLogTunerYaml.java @@ -18,24 +18,20 @@ package com.netflix.priam.tuner.dse; import com.google.inject.Inject; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; -/** - * Dse tuner for audit log via YAML. Use this for DSE version 4.x - * Created by aagrawal on 8/8/17. - */ +/** Dse tuner for audit log via YAML. Use this for DSE version 4.x Created by aagrawal on 8/8/17. */ public class AuditLogTunerYaml implements IAuditLogTuner { - private IDseConfiguration dseConfig; + private final IDseConfiguration dseConfig; private static final String AUDIT_LOG_DSE_ENTRY = "audit_logging_options"; private static final Logger logger = LoggerFactory.getLogger(AuditLogTunerYaml.class); @@ -50,12 +46,15 @@ public void tuneAuditLog() { Yaml yaml = new Yaml(options); String dseYaml = dseConfig.getDseYamlLocation(); try { - Map map = (Map) yaml.load(new FileInputStream(dseYaml)); + Map map = yaml.load(new FileInputStream(dseYaml)); if (map.containsKey(AUDIT_LOG_DSE_ENTRY)) { - Boolean isEnabled = (Boolean) ((Map) map.get(AUDIT_LOG_DSE_ENTRY)).get("enabled"); + Boolean isEnabled = + (Boolean) + ((Map) map.get(AUDIT_LOG_DSE_ENTRY)).get("enabled"); - // Enable/disable audit logging (need this in addition to log4j-server.properties settings) + // Enable/disable audit logging (need this in addition to log4j-server.properties + // settings) if (dseConfig.isAuditLogEnabled()) { if (!isEnabled) { ((Map) map.get(AUDIT_LOG_DSE_ENTRY)).put("enabled", true); @@ -70,9 +69,12 @@ public void tuneAuditLog() { } yaml.dump(map, new FileWriter(dseYaml)); } catch (FileNotFoundException fileNotFound) { - logger.error("FileNotFound while trying to read yaml audit log for tuning: {}", dseYaml); + logger.error( + "FileNotFound while trying to read yaml audit log for tuning: {}", dseYaml); } catch (IOException e) { - logger.error("IOException while trying to write yaml file for audit log tuning: {}", dseYaml); + logger.error( + "IOException while trying to write yaml file for audit log tuning: {}", + dseYaml); } } } diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/DseProcessManager.java b/priam/src/main/java/com/netflix/priam/tuner/dse/DseProcessManager.java index 2ee07d7b9..9560a3925 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/DseProcessManager.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/DseProcessManager.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.tuner.dse; @@ -21,14 +19,17 @@ import com.netflix.priam.health.InstanceState; import com.netflix.priam.merics.CassMonitorMetrics; import com.netflix.priam.tuner.dse.IDseConfiguration.NodeType; - import java.util.Map; public class DseProcessManager extends CassandraProcessManager { private final IDseConfiguration dseConfig; @Inject - public DseProcessManager(IConfiguration config, IDseConfiguration dseConfig, InstanceState instanceState, CassMonitorMetrics cassMonitorMetrics) { + public DseProcessManager( + IConfiguration config, + IDseConfiguration dseConfig, + InstanceState instanceState, + CassMonitorMetrics cassMonitorMetrics) { super(config, instanceState, cassMonitorMetrics); this.dseConfig = dseConfig; } @@ -37,14 +38,9 @@ protected void setEnv(Map env) { super.setEnv(env); NodeType nodeType = dseConfig.getNodeType(); - if (nodeType == NodeType.ANALYTIC_HADOOP) - env.put("CLUSTER_TYPE", "-t"); - else if (nodeType == NodeType.ANALYTIC_SPARK) - env.put("CLUSTER_TYPE", "-k"); - else if (nodeType == NodeType.ANALYTIC_HADOOP_SPARK) - env.put("CLUSTER_TYPE", "-k -t"); - else if (nodeType == NodeType.SEARCH) - env.put("CLUSTER_TYPE", "-s"); + if (nodeType == NodeType.ANALYTIC_HADOOP) env.put("CLUSTER_TYPE", "-t"); + else if (nodeType == NodeType.ANALYTIC_SPARK) env.put("CLUSTER_TYPE", "-k"); + else if (nodeType == NodeType.ANALYTIC_HADOOP_SPARK) env.put("CLUSTER_TYPE", "-k -t"); + else if (nodeType == NodeType.SEARCH) env.put("CLUSTER_TYPE", "-s"); } - } diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/DseTuner.java b/priam/src/main/java/com/netflix/priam/tuner/dse/DseTuner.java index df07c7c22..047d06052 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/DseTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/DseTuner.java @@ -1,34 +1,33 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.tuner.dse; +import static com.netflix.priam.tuner.dse.IDseConfiguration.NodeType; +import static org.apache.cassandra.locator.SnitchProperties.RACKDC_PROPERTY_FILENAME; + import com.google.inject.Inject; +import com.netflix.priam.config.IBackupRestoreConfig; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.tuner.StandardTuner; -import org.apache.cassandra.io.util.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.FileReader; import java.io.FileWriter; import java.io.Reader; import java.util.Properties; - -import static com.netflix.priam.tuner.dse.IDseConfiguration.NodeType; -import static org.apache.cassandra.locator.SnitchProperties.RACKDC_PROPERTY_FILENAME; +import org.apache.cassandra.io.util.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Makes Datastax Enterprise-specific changes to the c* yaml and dse-yaml. @@ -42,13 +41,19 @@ public class DseTuner extends StandardTuner { private final IAuditLogTuner auditLogTuner; @Inject - public DseTuner(IConfiguration config, IDseConfiguration dseConfig, IAuditLogTuner auditLogTuner) { - super(config); + public DseTuner( + IConfiguration config, + IBackupRestoreConfig backupRestoreConfig, + IDseConfiguration dseConfig, + IAuditLogTuner auditLogTuner, + InstanceInfo instanceInfo) { + super(config, backupRestoreConfig, instanceInfo); this.dseConfig = dseConfig; this.auditLogTuner = auditLogTuner; } - public void writeAllProperties(String yamlLocation, String hostname, String seedProvider) throws Exception { + public void writeAllProperties(String yamlLocation, String hostname, String seedProvider) + throws Exception { super.writeAllProperties(yamlLocation, hostname, seedProvider); writeCassandraSnitchProperties(); auditLogTuner.tuneAuditLog(); @@ -56,8 +61,7 @@ public void writeAllProperties(String yamlLocation, String hostname, String seed private void writeCassandraSnitchProperties() { final NodeType nodeType = dseConfig.getNodeType(); - if (nodeType == NodeType.REAL_TIME_QUERY) - return; + if (nodeType == NodeType.REAL_TIME_QUERY) return; Reader reader = null; try { @@ -66,14 +70,10 @@ private void writeCassandraSnitchProperties() { Properties properties = new Properties(); properties.load(reader); String suffix = ""; - if (nodeType == NodeType.SEARCH) - suffix = "_solr"; - if (nodeType == NodeType.ANALYTIC_HADOOP) - suffix = "_hadoop"; - if (nodeType == NodeType.ANALYTIC_HADOOP_SPARK) - suffix = "_hadoop_spark"; - if (nodeType == NodeType.ANALYTIC_SPARK) - suffix = "_spark"; + if (nodeType == NodeType.SEARCH) suffix = "_solr"; + if (nodeType == NodeType.ANALYTIC_HADOOP) suffix = "_hadoop"; + if (nodeType == NodeType.ANALYTIC_HADOOP_SPARK) suffix = "_hadoop_spark"; + if (nodeType == NodeType.ANALYTIC_SPARK) suffix = "_spark"; properties.put("dc_suffix", suffix); properties.store(new FileWriter(filePath), ""); diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/IAuditLogTuner.java b/priam/src/main/java/com/netflix/priam/tuner/dse/IAuditLogTuner.java index f8d959b6e..909238273 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/IAuditLogTuner.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/IAuditLogTuner.java @@ -20,9 +20,8 @@ import com.google.inject.ImplementedBy; /** - * This is intended for tuning audit log settings. - * Audit log settings file change between cassandra version from log4j to yaml. - * Created by aagrawal on 8/8/17. + * This is intended for tuning audit log settings. Audit log settings file change between cassandra + * version from log4j to yaml. Created by aagrawal on 8/8/17. */ @ImplementedBy(AuditLogTunerYaml.class) interface IAuditLogTuner { diff --git a/priam/src/main/java/com/netflix/priam/tuner/dse/IDseConfiguration.java b/priam/src/main/java/com/netflix/priam/tuner/dse/IDseConfiguration.java index 471fcb058..3c6de287f 100644 --- a/priam/src/main/java/com/netflix/priam/tuner/dse/IDseConfiguration.java +++ b/priam/src/main/java/com/netflix/priam/tuner/dse/IDseConfiguration.java @@ -1,16 +1,14 @@ /** * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.tuner.dse; @@ -23,9 +21,7 @@ * @author jason brown */ public interface IDseConfiguration { - /** - * Using Datastax's terms here for the different types of nodes. - */ + /** Using Datastax's terms here for the different types of nodes. */ enum NodeType { /** vanilla Cassandra node */ REAL_TIME_QUERY("cassandra"), @@ -50,8 +46,7 @@ enum NodeType { public static NodeType getByAltName(String altName) { for (NodeType nt : NodeType.values()) { - if (nt.altName.toLowerCase().equals(altName)) - return nt; + if (nt.altName.toLowerCase().equals(altName)) return nt; } throw new IllegalArgumentException("Unknown node type: " + altName); } @@ -67,7 +62,7 @@ public static NodeType getByAltName(String altName) { boolean isAuditLogEnabled(); - /** @return comma-delimited list of keyspace names */ + /** @return comma-delimited list of keyspace names */ String getAuditLogExemptKeyspaces(); /** @@ -75,7 +70,13 @@ public static NodeType getByAltName(String altName) { * http://www.datastax.com/docs/datastax_enterprise3.1/security/data_auditing#data-auditing */ enum AuditLogCategory { - ADMIN, ALL, AUTH, DML, DDL, DCL, QUERY + ADMIN, + ALL, + AUTH, + DML, + DDL, + DCL, + QUERY } Set getAuditLogCategories(); diff --git a/priam/src/main/java/com/netflix/priam/utils/BoundedExponentialRetryCallable.java b/priam/src/main/java/com/netflix/priam/utils/BoundedExponentialRetryCallable.java index b5cde9163..413ee01e5 100644 --- a/priam/src/main/java/com/netflix/priam/utils/BoundedExponentialRetryCallable.java +++ b/priam/src/main/java/com/netflix/priam/utils/BoundedExponentialRetryCallable.java @@ -16,21 +16,21 @@ */ package com.netflix.priam.utils; +import java.util.concurrent.CancellationException; import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.CancellationException; - public abstract class BoundedExponentialRetryCallable extends RetryableCallable { - private final static long MAX_SLEEP = 10000; - private final static long MIN_SLEEP = 1000; - private final static int MAX_RETRIES = 10; + protected static final long MAX_SLEEP = 10000; + protected static final long MIN_SLEEP = 1000; + protected static final int MAX_RETRIES = 10; - private static final Logger logger = LoggerFactory.getLogger(BoundedExponentialRetryCallable.class); - private long max; - private long min; - private int maxRetries; + private static final Logger logger = + LoggerFactory.getLogger(BoundedExponentialRetryCallable.class); + private final long max; + private final long min; + private final int maxRetries; private final ThreadSleeper sleeper = new ThreadSleeper(); public BoundedExponentialRetryCallable() { @@ -46,7 +46,7 @@ public BoundedExponentialRetryCallable(long minSleep, long maxSleep, int maxNumR } public T call() throws Exception { - long delay = min;// ms + long delay = min; // ms int retry = 0; int logCounter = 0; while (true) { @@ -65,7 +65,10 @@ public T call() throws Exception { sleeper.sleep(delay); } else if (delay >= max && retry <= maxRetries) { if (logger.isErrorEnabled()) { - logger.error(String.format("Retry #%d for: %s", retry, ExceptionUtils.getStackTrace(e))); + logger.error( + String.format( + "Retry #%d for: %s", + retry, ExceptionUtils.getStackTrace(e))); } sleeper.sleep(max); } else { @@ -76,5 +79,4 @@ public T call() throws Exception { } } } - } diff --git a/priam/src/main/java/com/netflix/priam/utils/DateUtil.java b/priam/src/main/java/com/netflix/priam/utils/DateUtil.java index 353947bda..ff28cf96f 100644 --- a/priam/src/main/java/com/netflix/priam/utils/DateUtil.java +++ b/priam/src/main/java/com/netflix/priam/utils/DateUtil.java @@ -17,31 +17,27 @@ package com.netflix.priam.utils; - -import org.apache.commons.lang3.StringUtils; -import org.apache.http.client.utils.DateUtils; - -import javax.inject.Singleton; import java.time.Instant; +import java.time.LocalDate; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeParseException; +import java.time.temporal.ChronoUnit; import java.util.Date; +import javax.inject.Singleton; +import org.apache.commons.lang3.StringUtils; +import org.apache.http.client.utils.DateUtils; -/** - * Utility functions for date. - * Created by aagrawal on 7/10/17. - */ +/** Utility functions for date. Created by aagrawal on 7/10/17. */ @Singleton public class DateUtil { - public final static String yyyyMMdd = "yyyyMMdd"; - public final static String yyyyMMddHHmm = "yyyyMMddHHmm"; - public final static String ddMMyyyyHHmm = "ddMMyyyyHHmm"; - private final static String[] patterns = {yyyyMMddHHmm, yyyyMMdd, ddMMyyyyHHmm}; - private final static ZoneId defaultZoneId = ZoneId.systemDefault(); - private final static ZoneId utcZoneId = ZoneId.of("UTC"); + public static final String yyyyMMdd = "yyyyMMdd"; + public static final String yyyyMMddHHmm = "yyyyMMddHHmm"; + private static final String[] patterns = {yyyyMMddHHmm, yyyyMMdd}; + private static final ZoneId defaultZoneId = ZoneId.systemDefault(); + private static final ZoneId utcZoneId = ZoneId.of("UTC"); /** * Format the given date in format yyyyMMdd @@ -50,8 +46,7 @@ public class DateUtil { * @return date formatted in yyyyMMdd */ public static String formatyyyyMMdd(Date date) { - if (date == null) - return null; + if (date == null) return null; return DateUtils.formatDate(date, yyyyMMdd); } @@ -62,15 +57,14 @@ public static String formatyyyyMMdd(Date date) { * @return date formatted in yyyyMMddHHmm */ public static String formatyyyyMMddHHmm(Date date) { - if (date == null) - return null; + if (date == null) return null; return DateUtils.formatDate(date, yyyyMMddHHmm); } /** * Format the given date in given format * - * @param date to format + * @param date to format * @param pattern e.g. yyyyMMddHHmm * @return formatted date */ @@ -85,17 +79,17 @@ public static String formatDate(Date date, String pattern) { * @return the parsed date or null if input could not be parsed */ public static Date getDate(String date) { - if (StringUtils.isEmpty(date)) - return null; + if (StringUtils.isEmpty(date)) return null; return DateUtils.parseDate(date, patterns); } /** * Convert date to LocalDateTime using system default zone. + * * @param date Date to be transformed * @return converted date to LocalDateTime */ - public static LocalDateTime convert(Date date){ + public static LocalDateTime convert(Date date) { if (date == null) return null; return date.toInstant().atZone(defaultZoneId).toLocalDateTime(); } @@ -106,7 +100,7 @@ public static LocalDateTime convert(Date date){ * @param date to format * @return date formatted in yyyyMMdd */ - public static String formatyyyyMMdd(LocalDateTime date){ + public static String formatyyyyMMdd(LocalDateTime date) { if (date == null) return null; return date.format(DateTimeFormatter.ofPattern(yyyyMMdd)); } @@ -128,35 +122,45 @@ public static String formatyyyyMMddHHmm(LocalDateTime date) { * @param date to parse. Accepted formats are yyyyMMddHHmm and yyyyMMdd * @return the parsed LocalDateTime or null if input could not be parsed */ - public static LocalDateTime getLocalDateTime(String date){ - if (StringUtils.isEmpty(date)) - return null; - - for (String pattern : patterns){ - LocalDateTime localDateTime = LocalDateTime.parse(date, DateTimeFormatter.ofPattern(pattern)); - if (localDateTime != null) - return localDateTime; + public static LocalDateTime getLocalDateTime(String date) { + if (StringUtils.isEmpty(date)) return null; + + try { + LocalDateTime localDateTime = + LocalDateTime.parse(date, DateTimeFormatter.ofPattern(yyyyMMddHHmm)); + if (localDateTime != null) return localDateTime; + } catch (DateTimeParseException e) { + // Try the date only. + try { + LocalDate localDate = LocalDate.parse(date, DateTimeFormatter.ofPattern(yyyyMMdd)); + return localDate.atTime(0, 0); + } catch (DateTimeParseException ex) { + return null; + } } + return null; } /** * Return the current instant + * * @return the instant */ - public static Instant getInstant(){ + public static Instant getInstant() { return Instant.now(); } /** - * Format the instant based on the pattern passed. If instant or pattern is null, null is returned. + * Format the instant based on the pattern passed. If instant or pattern is null, null is + * returned. + * * @param pattern Pattern that should * @param instant Instant in time * @return The formatted instant based on the pattern. Null, if pattern or instant is null. */ - public static String formatInstant(String pattern, Instant instant){ - if (instant == null || StringUtils.isEmpty(pattern)) - return null; + public static String formatInstant(String pattern, Instant instant) { + if (instant == null || StringUtils.isEmpty(pattern)) return null; DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern).withZone(utcZoneId); return formatter.format(instant); @@ -164,24 +168,62 @@ public static String formatInstant(String pattern, Instant instant){ /** * Parse the dateTime string to Instant based on the predefined set of patterns. + * * @param dateTime DateTime string that needs to be parsed. * @return Instant object depicting the date/time. */ - public static final Instant parseInstant(String dateTime){ - if (StringUtils.isEmpty(dateTime)) - return null; + public static final Instant parseInstant(String dateTime) { + LocalDateTime localDateTime = getLocalDateTime(dateTime); + if (localDateTime == null) return null; + return localDateTime.atZone(utcZoneId).toInstant(); + } - for (String pattern : patterns){ - try { - Instant instant = DateTimeFormatter.ofPattern(pattern).withZone(utcZoneId).parse(dateTime, Instant::from); - if (instant != null) - return instant; - }catch (DateTimeParseException e) - { - //Do nothing. + public static class DateRange { + Instant startTime; + Instant endTime; + + public DateRange(Instant startTime, Instant endTime) { + this.startTime = startTime; + this.endTime = endTime; + } + + public DateRange(String daterange) { + if (StringUtils.isBlank(daterange) || daterange.equalsIgnoreCase("default")) { + endTime = getInstant(); + startTime = endTime.minus(1, ChronoUnit.DAYS); + } else { + String[] dates = daterange.split(","); + startTime = parseInstant(dates[0]); + endTime = parseInstant(dates[1]); } } - return null; - } + public String match() { + if (startTime == null || endTime == null) return StringUtils.EMPTY; + String sString = startTime.toEpochMilli() + ""; + String eString = endTime.toEpochMilli() + ""; + int diff = StringUtils.indexOfDifference(sString, eString); + if (diff < 0) return sString; + return sString.substring(0, diff); + } + + public Instant getStartTime() { + return startTime; + } + + public Instant getEndTime() { + return endTime; + } + + public String toString() { + return GsonJsonSerializer.getGson().toJson(this); + } + + @Override + public boolean equals(Object obj) { + return obj.getClass().equals(this.getClass()) + && (startTime.toEpochMilli() == ((DateRange) obj).startTime.toEpochMilli()) + && (endTime.toEpochMilli() == ((DateRange) obj).endTime.toEpochMilli()); + } + } } diff --git a/priam/src/main/java/com/netflix/priam/utils/ExponentialRetryCallable.java b/priam/src/main/java/com/netflix/priam/utils/ExponentialRetryCallable.java index a551781d4..e3b11498b 100644 --- a/priam/src/main/java/com/netflix/priam/utils/ExponentialRetryCallable.java +++ b/priam/src/main/java/com/netflix/priam/utils/ExponentialRetryCallable.java @@ -16,18 +16,17 @@ */ package com.netflix.priam.utils; +import java.util.concurrent.CancellationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.CancellationException; - public abstract class ExponentialRetryCallable extends RetryableCallable { - public final static long MAX_SLEEP = 240000; - public final static long MIN_SLEEP = 200; + public static final long MAX_SLEEP = 240000; + public static final long MIN_SLEEP = 200; private static final Logger logger = LoggerFactory.getLogger(ExponentialRetryCallable.class); - private long max; - private long min; + private final long max; + private final long min; public ExponentialRetryCallable() { this.max = MAX_SLEEP; @@ -40,7 +39,7 @@ public ExponentialRetryCallable(long minSleep, long maxSleep) { } public T call() throws Exception { - long delay = min;// ms + long delay = min; // ms while (true) { try { return retriableCall(); @@ -58,5 +57,4 @@ public T call() throws Exception { } } } - } diff --git a/priam/src/main/java/com/netflix/priam/utils/FifoQueue.java b/priam/src/main/java/com/netflix/priam/utils/FifoQueue.java index 5e21a1515..4cb9e60b7 100644 --- a/priam/src/main/java/com/netflix/priam/utils/FifoQueue.java +++ b/priam/src/main/java/com/netflix/priam/utils/FifoQueue.java @@ -21,15 +21,10 @@ public class FifoQueue> extends TreeSet { private static final long serialVersionUID = -7388604551920505669L; - private int capacity; + private final int capacity; public FifoQueue(int capacity) { - super(new Comparator() { - @Override - public int compare(E o1, E o2) { - return o1.compareTo(o2); - } - }); + super(Comparator.naturalOrder()); this.capacity = capacity; } @@ -40,7 +35,6 @@ public FifoQueue(int capacity, Comparator comparator) { public synchronized void adjustAndAdd(E e) { add(e); - if (capacity < size()) - pollFirst(); + if (capacity < size()) pollFirst(); } } diff --git a/priam/src/main/java/com/netflix/priam/utils/GsonJsonSerializer.java b/priam/src/main/java/com/netflix/priam/utils/GsonJsonSerializer.java index 08a37e866..b4775b329 100644 --- a/priam/src/main/java/com/netflix/priam/utils/GsonJsonSerializer.java +++ b/priam/src/main/java/com/netflix/priam/utils/GsonJsonSerializer.java @@ -20,7 +20,6 @@ import com.google.gson.stream.JsonReader; import com.google.gson.stream.JsonToken; import com.google.gson.stream.JsonWriter; - import java.io.IOException; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -30,23 +29,22 @@ import java.time.LocalDateTime; import java.util.Date; -/** - * Created by aagrawal on 10/12/17. - */ +/** Created by aagrawal on 10/12/17. */ public class GsonJsonSerializer { - private static final Gson gson = new GsonBuilder() - //.serializeNulls() - .serializeSpecialFloatingPointValues() - .setPrettyPrinting() - .disableHtmlEscaping() - .registerTypeAdapter(Date.class, new DateTypeAdapter()) - .registerTypeAdapter(LocalDateTime.class, new LocalDateTimeTypeAdapter()) - .registerTypeAdapter(Instant.class, new InstantTypeAdapter()) - .registerTypeAdapter(Path.class, new PathTypeAdapter()) - .setExclusionStrategies(new PriamAnnotationExclusionStrategy()) - .create(); - - public static Gson getGson(){ + private static final Gson gson = + new GsonBuilder() + // .serializeNulls() + .serializeSpecialFloatingPointValues() + .setPrettyPrinting() + .disableHtmlEscaping() + .registerTypeAdapter(Date.class, new DateTypeAdapter()) + .registerTypeAdapter(LocalDateTime.class, new LocalDateTimeTypeAdapter()) + .registerTypeAdapter(Instant.class, new InstantTypeAdapter()) + .registerTypeAdapter(Path.class, new PathTypeAdapter()) + .setExclusionStrategies(new PriamAnnotationExclusionStrategy()) + .create(); + + public static Gson getGson() { return gson; } @@ -63,7 +61,7 @@ public boolean shouldSkipField(FieldAttributes f) { public static class PriamAnnotation { @Retention(RetentionPolicy.RUNTIME) -// @Target({ElementType.FIELD,ElementType.METHOD}) + // @Target({ElementType.FIELD,ElementType.METHOD}) public @interface GsonIgnore { // Field tag only annotation } @@ -71,8 +69,7 @@ public static class PriamAnnotation { static class DateTypeAdapter extends TypeAdapter { @Override - public void write(JsonWriter out, Date value) - throws IOException { + public void write(JsonWriter out, Date value) throws IOException { out.value(DateUtil.formatyyyyMMddHHmm(value)); } @@ -87,14 +84,12 @@ public Date read(JsonReader in) throws IOException { return null; } return DateUtil.getDate(result); - } } static class LocalDateTimeTypeAdapter extends TypeAdapter { @Override - public void write(JsonWriter out, LocalDateTime value) - throws IOException { + public void write(JsonWriter out, LocalDateTime value) throws IOException { out.value(DateUtil.formatyyyyMMddHHmm(value)); } @@ -109,19 +104,17 @@ public LocalDateTime read(JsonReader in) throws IOException { return null; } return DateUtil.getLocalDateTime(result); - } } static class InstantTypeAdapter extends TypeAdapter { @Override - public void write(JsonWriter out, Instant value) - throws IOException { - out.value(getEpoch(value)); + public void write(JsonWriter out, Instant value) throws IOException { + out.value(getEpoch(value)); } - private long getEpoch(Instant value){ - return (value == null)? null: value.toEpochMilli(); + private long getEpoch(Instant value) { + return (value == null) ? null : value.toEpochMilli(); } @Override @@ -135,15 +128,13 @@ public Instant read(JsonReader in) throws IOException { return null; } return Instant.ofEpochMilli(Long.parseLong(result)); - } } static class PathTypeAdapter extends TypeAdapter { @Override - public void write(JsonWriter out, Path value) - throws IOException { - String fileName = (value != null)? value.toFile().getName():null; + public void write(JsonWriter out, Path value) throws IOException { + String fileName = (value != null) ? value.toFile().getName() : null; out.value(fileName); } @@ -158,7 +149,6 @@ public Path read(JsonReader in) throws IOException { return null; } return Paths.get(result); - } } } diff --git a/priam/src/main/java/com/netflix/priam/utils/ITokenManager.java b/priam/src/main/java/com/netflix/priam/utils/ITokenManager.java index 506ec2043..b3a669587 100644 --- a/priam/src/main/java/com/netflix/priam/utils/ITokenManager.java +++ b/priam/src/main/java/com/netflix/priam/utils/ITokenManager.java @@ -17,14 +17,11 @@ package com.netflix.priam.utils; import com.google.inject.ImplementedBy; - import java.math.BigInteger; import java.util.List; @ImplementedBy(TokenManager.class) public interface ITokenManager { - String createToken(int mySlot, int racCount, int racSize, String region); - String createToken(int mySlot, int totalCount, String region); BigInteger findClosestToken(BigInteger tokenToSearch, List tokenList); diff --git a/priam/src/main/java/com/netflix/priam/utils/JMXConnectorMgr.java b/priam/src/main/java/com/netflix/priam/utils/JMXConnectorMgr.java deleted file mode 100644 index 5cea1d2ae..000000000 --- a/priam/src/main/java/com/netflix/priam/utils/JMXConnectorMgr.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.priam.utils; - -import com.netflix.priam.config.IConfiguration; -import org.apache.cassandra.tools.NodeProbe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Represents a connection to remote JMX mbean server. This object differs from JMXNodeTool as it is meant for short - * lived connection to remote mbean server. - * - * Created by vinhn on 10/11/16. - */ -public class JMXConnectorMgr extends NodeProbe { - private static final Logger logger = LoggerFactory.getLogger(JMXConnectorMgr.class); - - /* - * create a connection to remote mbean server and get proxy to various mbeans - * @throws exception if unable to create the connection, e.g. Cassandra process not running. - */ - public JMXConnectorMgr(IConfiguration config) throws IOException, InterruptedException { - super("localhost", config.getJmxPort()); - } - - @Override - /* - close the connection to remote mbean server - */ - public void close() throws IOException { - super.close(); //close the connection to remote mbean server - } - -} diff --git a/priam/src/main/java/com/netflix/priam/utils/JMXNodeTool.java b/priam/src/main/java/com/netflix/priam/utils/JMXNodeTool.java deleted file mode 100644 index c393481fa..000000000 --- a/priam/src/main/java/com/netflix/priam/utils/JMXNodeTool.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Copyright 2013 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.utils; - -import com.google.inject.Inject; -import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; -import org.apache.cassandra.db.ColumnFamilyStoreMBean; -import org.apache.cassandra.repair.messages.RepairOption; -import org.apache.cassandra.tools.NodeProbe; -import org.codehaus.jettison.json.JSONArray; -import org.codehaus.jettison.json.JSONException; -import org.codehaus.jettison.json.JSONObject; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.JMX; -import javax.management.MBeanServerConnection; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; -import java.io.IOException; -import java.io.PrintStream; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryUsage; -import java.lang.reflect.Field; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.text.DecimalFormat; -import java.util.*; -import java.util.Map.Entry; -import java.util.concurrent.ExecutionException; - -/** - * Class to get data out of Cassandra JMX - */ -@Singleton -public class JMXNodeTool extends NodeProbe implements INodeToolObservable { - private static final Logger logger = LoggerFactory.getLogger(JMXNodeTool.class); - private static volatile JMXNodeTool tool = null; - private MBeanServerConnection mbeanServerConn = null; - - private static Set observers = new HashSet(); - - /** - * Hostname and Port to talk to will be same server for now optionally we - * might want the ip to poll. - * - * NOTE: This class shouldn't be a singleton and this shouldn't be cached. - * - * This will work only if cassandra runs. - */ - public JMXNodeTool(String host, int port) throws IOException, InterruptedException { - super(host, port); - } - - public JMXNodeTool(String host, int port, String username, String password) throws IOException, InterruptedException { - super(host, port, username, password); - } - - - @Inject - public JMXNodeTool(IConfiguration config) throws IOException, InterruptedException { - super("localhost", config.getJmxPort()); - } - - /** - * try to create if it is null. - * @throws JMXConnectionException - */ - public static JMXNodeTool instance(IConfiguration config) throws JMXConnectionException { - if (!testConnection()) - tool = connect(config); - return tool; - } - - public static T getRemoteBean(Class clazz, String mbeanName, IConfiguration config, boolean mxbean) throws IOException, MalformedObjectNameException { - if (mxbean) - return ManagementFactory.newPlatformMXBeanProxy(JMXNodeTool.instance(config).mbeanServerConn, mbeanName, clazz); - else - return JMX.newMBeanProxy(JMXNodeTool.instance(config).mbeanServerConn, new ObjectName(mbeanName), clazz); - - } - - /** - * This method will test if you can connect and query something before handing over the connection, - * This is required for our retry logic. - * @return - */ - private static boolean testConnection() { - // connecting first time hence return false. - if (tool == null) - return false; - - try { - MBeanServerConnection serverConn = tool.mbeanServerConn; - if (serverConn == null) { - logger.info("Test connection to remove MBean server failed as there is no connection."); - return false; - } - - if (serverConn.getMBeanCount() < 1) { //If C* is up, it should have at multiple MBeans registered. - logger.info("Test connection to remove MBean server failed as there is no registered MBeans."); - return false; - } - } catch (Throwable ex) { - SystemUtils.closeQuietly(tool); - logger.error("Exception while checking JMX connection to C*, msg: {}", ex.getLocalizedMessage()); - return false; - } - return true; - } - - /** - * A means to clean up existing and recreate the JMX connection to the Cassandra process. - * @return the new connection. - */ - public static synchronized JMXNodeTool createNewConnection(final IConfiguration config) throws JMXConnectionException { - return createConnection(config); - } - - public static synchronized JMXNodeTool connect(final IConfiguration config) throws JMXConnectionException { - //lets make sure some other monitor didn't sneak in the recreated the connection already - if (!testConnection()) { - - if (tool != null) { - try { - tool.close(); //Ensure we properly close any existing (even if it's corrupted) connection to the remote jmx agent - } catch (IOException e) { - logger.warn("Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}", e.getLocalizedMessage(), e); - } - } - - } else { - //Someone beat you and already created the connection, nothing you need to do.. - return tool; - } - - return createConnection(config); - } - - private static JMXNodeTool createConnection(final IConfiguration config) throws JMXConnectionException { - // If Cassandra is started then only start the monitoring - if (!CassandraMonitor.hasCassadraStarted()) { - String exceptionMsg = "Cannot perform connection to remove jmx agent as Cassandra has not yet started, check back again later"; - logger.debug(exceptionMsg); - throw new JMXConnectionException(exceptionMsg); - } - - if (tool != null) { //lets make sure we properly close any existing (even if it's corrupted) connection to the remote jmx agent - try { - tool.close(); - } catch (IOException e) { - logger.warn("Exception performing house cleaning -- closing current connection to jmx remote agent. Msg: {}", e.getLocalizedMessage(), e); - } - } - - try { - - tool = new BoundedExponentialRetryCallable() { - @Override - public JMXNodeTool retriableCall() throws Exception { - JMXNodeTool nodetool; - if ((config.getJmxUsername() == null || config.getJmxUsername().isEmpty()) && - (config.getJmxPassword() == null || config.getJmxPassword().isEmpty())) { - nodetool = new JMXNodeTool("localhost", config.getJmxPort()); - } - else { - nodetool = new JMXNodeTool("localhost", config.getJmxPort(), config.getJmxUsername(), config.getJmxPassword()); - } - - Field fields[] = NodeProbe.class.getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - if (!fields[i].getName().equals("mbeanServerConn")) - continue; - fields[i].setAccessible(true); - nodetool.mbeanServerConn = (MBeanServerConnection) fields[i].get(nodetool); - } - - return nodetool; - } - }.call(); - - } catch (Exception e) { - logger.error(e.getMessage(), e); - throw new JMXConnectionException(e.getMessage()); - } - - logger.info("Connected to remote jmx agent, will notify interested parties!"); - Iterator it = observers.iterator(); - while (it.hasNext()) { - INodeToolObserver observer = it.next(); - observer.nodeToolHasChanged(tool); - } - - return tool; - } - - /** - * You must do the compaction before running this to remove the duplicate - * tokens out of the server. TODO code it. - */ - @SuppressWarnings("unchecked") - public JSONObject estimateKeys() throws JSONException { - Iterator> it = super.getColumnFamilyStoreMBeanProxies(); - JSONObject object = new JSONObject(); - while (it.hasNext()) { - Entry entry = it.next(); - object.put("keyspace", entry.getKey()); - object.put("column_family", entry.getValue().getColumnFamilyName()); - object.put("estimated_size", entry.getValue().estimateKeys()); - } - return object; - } - - @SuppressWarnings("unchecked") - public JSONObject info() throws JSONException { - JSONObject object = new JSONObject(); - object.put("gossip_active", isInitialized()); - object.put("thrift_active", isThriftServerRunning()); - object.put("token", getTokens().toString()); - object.put("load", getLoadString()); - object.put("generation_no", getCurrentGenerationNumber()); - object.put("uptime", getUptime() / 1000); - MemoryUsage heapUsage = getHeapMemoryUsage(); - double memUsed = (double) heapUsage.getUsed() / (1024 * 1024); - double memMax = (double) heapUsage.getMax() / (1024 * 1024); - object.put("heap_memory_mb", memUsed + "/" + memMax); - object.put("data_center", getDataCenter()); - object.put("rack", getRack()); - return object; - } - - @SuppressWarnings("unchecked") - public JSONArray ring(String keyspace) throws JSONException { - JSONArray ring = new JSONArray(); - Map tokenToEndpoint = getTokenToEndpointMap(); - List sortedTokens = new ArrayList(tokenToEndpoint.keySet()); - - Collection liveNodes = getLiveNodes(); - Collection deadNodes = getUnreachableNodes(); - Collection joiningNodes = getJoiningNodes(); - Collection leavingNodes = getLeavingNodes(); - Collection movingNodes = getMovingNodes(); - Map loadMap = getLoadMap(); - - String format = "%-16s%-12s%-12s%-7s%-8s%-16s%-20s%-44s%n"; - - // Calculate per-token ownership of the ring - Map ownerships; - try { - ownerships = effectiveOwnership(keyspace); - } catch (IllegalStateException ex) { - ownerships = getOwnership(); - } - - for (String token : sortedTokens) { - String primaryEndpoint = tokenToEndpoint.get(token); - String dataCenter; - try { - dataCenter = getEndpointSnitchInfoProxy().getDatacenter(primaryEndpoint); - } catch (UnknownHostException e) { - dataCenter = "Unknown"; - } - String rack; - try { - rack = getEndpointSnitchInfoProxy().getRack(primaryEndpoint); - } catch (UnknownHostException e) { - rack = "Unknown"; - } - String status = liveNodes.contains(primaryEndpoint) - ? "Up" - : deadNodes.contains(primaryEndpoint) - ? "Down" - : "?"; - - String state = "Normal"; - - if (joiningNodes.contains(primaryEndpoint)) - state = "Joining"; - else if (leavingNodes.contains(primaryEndpoint)) - state = "Leaving"; - else if (movingNodes.contains(primaryEndpoint)) - state = "Moving"; - - String load = loadMap.containsKey(primaryEndpoint) - ? loadMap.get(primaryEndpoint) - : "?"; - String owns = new DecimalFormat("##0.00%").format(ownerships.get(token) == null ? 0.0F : ownerships.get(token)); - ring.put(createJson(primaryEndpoint, dataCenter, rack, status, state, load, owns, token)); - } - return ring; - } - - private JSONObject createJson(String primaryEndpoint, String dataCenter, String rack, String status, String state, String load, String owns, String token) throws JSONException { - JSONObject object = new JSONObject(); - object.put("endpoint", primaryEndpoint); - object.put("dc", dataCenter); - object.put("rack", rack); - object.put("status", status); - object.put("state", state); - object.put("load", load); - object.put("owns", owns); - object.put("token", token); - return object; - } - - public void repair(boolean isSequential, boolean localDataCenterOnly) throws IOException, ExecutionException, InterruptedException { - repair(isSequential, localDataCenterOnly, false); - } - - public void repair(boolean isSequential, boolean localDataCenterOnly, boolean primaryRange) throws IOException, ExecutionException, InterruptedException { - Map repairOptions = new HashMap<>(); - repairOptions.put(RepairOption.PARALLELISM_KEY, Boolean.toString(!isSequential)); - repairOptions.put(RepairOption.PRIMARY_RANGE_KEY, Boolean.toString(primaryRange)); - if (localDataCenterOnly) - repairOptions.put(RepairOption.DATACENTERS_KEY, getDataCenter()); - - PrintStream printStream = new PrintStream("repair.log"); - - for (String keyspace : getKeyspaces()) - repairAsync(printStream, keyspace, repairOptions); - } - - public void cleanup() throws IOException, ExecutionException, InterruptedException { - for (String keyspace : getKeyspaces()) - forceKeyspaceCleanup(0, keyspace); - } - - public void refresh(List keyspaces) throws IOException, ExecutionException, InterruptedException { - Iterator> it = super.getColumnFamilyStoreMBeanProxies(); - while (it.hasNext()) { - Entry entry = it.next(); - if (keyspaces.contains(entry.getKey())) { - logger.info("Refreshing {} {}", entry.getKey(), entry.getValue().getColumnFamilyName()); - loadNewSSTables(entry.getKey(), entry.getValue().getColumnFamilyName()); - } - } - } - - @Override - public void close() throws IOException { - synchronized (JMXNodeTool.class) { - tool = null; - super.close(); - } - } - - /** - * @param observer to add to list of internal observers. This behavior is thread-safe. - */ - @Override - public void addObserver(INodeToolObserver observer) { - if (observer == null) - throw new NullPointerException("Cannot not observer."); - synchronized (observers) { - observers.add(observer); //if observer exist, it's a noop - } - - } - - /** - * @param observer to be removed; behavior is thread-safe. - */ - @Override - public void deleteObserver(INodeToolObserver observer) { - synchronized (observers) { - observers.remove(observer); - } - } -} \ No newline at end of file diff --git a/priam/src/main/java/com/netflix/priam/utils/MaxSizeHashMap.java b/priam/src/main/java/com/netflix/priam/utils/MaxSizeHashMap.java index 8cf81f206..0da65266a 100644 --- a/priam/src/main/java/com/netflix/priam/utils/MaxSizeHashMap.java +++ b/priam/src/main/java/com/netflix/priam/utils/MaxSizeHashMap.java @@ -20,12 +20,10 @@ import java.util.LinkedHashMap; import java.util.Map; -/** - * Created by aagrawal on 7/11/17. - */ +/** Created by aagrawal on 7/11/17. */ /* - Limit the size of the hashmap using FIFO algorithm. - */ +Limit the size of the hashmap using FIFO algorithm. +*/ public class MaxSizeHashMap extends LinkedHashMap { private final int maxSize; @@ -38,4 +36,3 @@ protected boolean removeEldestEntry(Map.Entry eldest) { return size() > maxSize; } } - diff --git a/priam/src/main/java/com/netflix/priam/utils/RetryableCallable.java b/priam/src/main/java/com/netflix/priam/utils/RetryableCallable.java index 105345c9d..f53f3472d 100644 --- a/priam/src/main/java/com/netflix/priam/utils/RetryableCallable.java +++ b/priam/src/main/java/com/netflix/priam/utils/RetryableCallable.java @@ -16,17 +16,16 @@ */ package com.netflix.priam.utils; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; - public abstract class RetryableCallable implements Callable { private static final Logger logger = LoggerFactory.getLogger(RetryableCallable.class); private static final int DEFAULT_NUMBER_OF_RETRIES = 15; - public static final long DEFAULT_WAIT_TIME = 100; + private static final long DEFAULT_WAIT_TIME = 100; private int retrys; private long waitTime; @@ -72,4 +71,4 @@ public T call() throws Exception { protected void forEachExecution() { // do nothing by default. } -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/utils/Sleeper.java b/priam/src/main/java/com/netflix/priam/utils/Sleeper.java index b94e3ad42..4d864bdf2 100644 --- a/priam/src/main/java/com/netflix/priam/utils/Sleeper.java +++ b/priam/src/main/java/com/netflix/priam/utils/Sleeper.java @@ -18,9 +18,7 @@ import com.google.inject.ImplementedBy; -/** - * An abstraction to {@link Thread#sleep(long)} so we can mock it in tests. - */ +/** An abstraction to {@link Thread#sleep(long)} so we can mock it in tests. */ @ImplementedBy(ThreadSleeper.class) public interface Sleeper { void sleep(long waitTimeMs) throws InterruptedException; diff --git a/priam/src/main/java/com/netflix/priam/utils/SystemUtils.java b/priam/src/main/java/com/netflix/priam/utils/SystemUtils.java index 87bfd2a2f..d64730203 100644 --- a/priam/src/main/java/com/netflix/priam/utils/SystemUtils.java +++ b/priam/src/main/java/com/netflix/priam/utils/SystemUtils.java @@ -20,18 +20,15 @@ import com.google.common.hash.HashCode; import com.google.common.hash.Hashing; import com.google.common.io.Files; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.remote.JMXConnector; import java.io.*; import java.net.HttpURLConnection; import java.net.URL; import java.security.MessageDigest; import java.util.List; - +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class SystemUtils { private static final Logger logger = LoggerFactory.getLogger(SystemUtils.class); @@ -48,9 +45,8 @@ public static String getDataFromUrl(String url) { byte[] b = new byte[2048]; ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataInputStream d = new DataInputStream((FilterInputStream) conn.getContent()); - int c = 0; - while ((c = d.read(b, 0, b.length)) != -1) - bos.write(b, 0, c); + int c; + while ((c = d.read(b, 0, b.length)) != -1) bos.write(b, 0, c); String return_ = new String(bos.toByteArray(), Charsets.UTF_8); logger.info("Calling URL API: {} returns: {}", url, return_); conn.disconnect(); @@ -58,30 +54,24 @@ public static String getDataFromUrl(String url) { } catch (Exception ex) { throw new RuntimeException(ex); } - } - /** - * delete all the files/dirs in the given Directory but dont delete the dir - * itself. + * delete all the files/dirs in the given Directory but do not delete the dir itself. + * + * @param dirPath The directory path where all the child directories exist. + * @param childdirs List of child directories to be cleaned up in the dirPath + * @throws IOException If there is any error encountered during cleanup. */ public static void cleanupDir(String dirPath, List childdirs) throws IOException { - if (childdirs == null || childdirs.size() == 0) - FileUtils.cleanDirectory(new File(dirPath)); + if (childdirs == null || childdirs.size() == 0) FileUtils.cleanDirectory(new File(dirPath)); else { - for (String cdir : childdirs) - FileUtils.cleanDirectory(new File(dirPath + "/" + cdir)); + for (String cdir : childdirs) FileUtils.cleanDirectory(new File(dirPath + "/" + cdir)); } } - public static void createDirs(String location) { - File dirFile = new File(location); - if (dirFile.exists() && dirFile.isFile()) { - dirFile.delete(); - dirFile.mkdirs(); - } else if (!dirFile.exists()) - dirFile.mkdirs(); + public static void createDirs(String location) throws IOException { + FileUtils.forceMkdir(new File(location)); } public static byte[] md5(byte[] buf) { @@ -95,7 +85,10 @@ public static byte[] md5(byte[] buf) { } /** - * Get a Md5 string which is similar to OS Md5sum + * Calculate the MD5 hashsum of the given file. + * + * @param file File for which md5 checksum should be calculated. + * @return Get a Md5 string which is similar to OS Md5sum */ public static String md5(File file) { try { @@ -107,9 +100,9 @@ public static String md5(File file) { } public static String toHex(byte[] digest) { - StringBuffer sb = new StringBuffer(digest.length * 2); - for (int i = 0; i < digest.length; i++) { - String hex = Integer.toHexString(digest[i]); + StringBuilder sb = new StringBuilder(digest.length * 2); + for (byte aDigest : digest) { + String hex = Integer.toHexString(aDigest); if (hex.length() == 1) { sb.append("0"); } else if (hex.length() == 8) { @@ -124,60 +117,4 @@ public static String toBase64(byte[] md5) { byte encoded[] = Base64.encodeBase64(md5, false); return new String(encoded); } - - public static void closeQuietly(JMXNodeTool tool) { - try { - tool.close(); - } catch (Exception e) { - logger.warn("failed to close jxm node tool", e); - } - - } - - public static void closeQuietly(JMXConnector jmc) { - try { - jmc.close(); - } catch (Exception e) { - logger.warn("failed to close JMXConnectorMgr", e); - } - } - - /* - @param absolute path to input file - @return handle to input file - */ - public static BufferedReader readFile(String absPathToFile) throws IOException { - InputStream is = new FileInputStream(absPathToFile); - InputStreamReader isr = new InputStreamReader(is); - return new BufferedReader(isr); - } - - /* - Write the "line" to the file. If file does not exist, it's created. if file exists, its content will be overwritten with the input. - @param absolute path to file - @param input line - */ - public static void writeToFile(String filename, String line) { - File f = new File(filename); - PrintWriter pw = null; - FileWriter fw = null; - try { - if (!f.exists()) { - f.createNewFile(); - logger.info("File created, absolute path: {}", f.getAbsolutePath()); - } - - fw = new FileWriter(f, false); - pw = new PrintWriter(fw); - pw.print(line); - - } catch (IOException e) { - throw new IllegalStateException("Exception processing file: " + filename, e); - } finally { - if (pw != null) { - pw.flush(); - pw.close(); - } - } - } } diff --git a/priam/src/main/java/com/netflix/priam/utils/ThreadSleeper.java b/priam/src/main/java/com/netflix/priam/utils/ThreadSleeper.java index 412179817..57f4f1b71 100644 --- a/priam/src/main/java/com/netflix/priam/utils/ThreadSleeper.java +++ b/priam/src/main/java/com/netflix/priam/utils/ThreadSleeper.java @@ -16,9 +16,7 @@ */ package com.netflix.priam.utils; -/** - * Sleeper impl that delegates to Thread.sleep - */ +/** Sleeper impl that delegates to Thread.sleep */ public class ThreadSleeper implements Sleeper { @Override public void sleep(long waitTimeMs) throws InterruptedException { @@ -29,10 +27,7 @@ public void sleepQuietly(long waitTimeMs) { try { sleep(waitTimeMs); } catch (InterruptedException e) { - //no-op + // no-op } - } - - -} \ No newline at end of file +} diff --git a/priam/src/main/java/com/netflix/priam/utils/TokenManager.java b/priam/src/main/java/com/netflix/priam/utils/TokenManager.java index a3f5debbb..d9276a4da 100644 --- a/priam/src/main/java/com/netflix/priam/utils/TokenManager.java +++ b/priam/src/main/java/com/netflix/priam/utils/TokenManager.java @@ -21,8 +21,8 @@ import com.google.common.collect.Ordering; import com.google.inject.Inject; import com.netflix.priam.config.IConfiguration; - import java.math.BigInteger; +import java.util.Collections; import java.util.List; public class TokenManager implements ITokenManager { @@ -31,12 +31,11 @@ public class TokenManager implements ITokenManager { public static final BigInteger MINIMUM_TOKEN_MURMUR3 = new BigInteger("-2").pow(63); public static final BigInteger MAXIMUM_TOKEN_MURMUR3 = new BigInteger("2").pow(63); - private final BigInteger minimumToken; private final BigInteger maximumToken; private final BigInteger tokenRangeSize; - private IConfiguration config; + private final IConfiguration config; @Inject public TokenManager(IConfiguration config) { @@ -53,13 +52,14 @@ public TokenManager(IConfiguration config) { } /** - * Calculate a token for the given position, evenly spaced from other size-1 nodes. See + * Calculate a token for the given position, evenly spaced from other size-1 nodes. See * http://wiki.apache.org/cassandra/Operations. * * @param size number of slots by which the token space will be divided * @param position slot number, multiplier * @param offset added to token - * @return MAXIMUM_TOKEN / size * position + offset, if <= MAXIMUM_TOKEN, otherwise wrap around the MINIMUM_TOKEN + * @return MAXIMUM_TOKEN / size * position + offset, if <= MAXIMUM_TOKEN, otherwise wrap around + * the MINIMUM_TOKEN */ @VisibleForTesting BigInteger initialToken(int size, int position, int offset) { @@ -70,30 +70,13 @@ BigInteger initialToken(int size, int position, int offset) { * unit test failures. */ Preconditions.checkArgument(position >= 0, "position must be >= 0"); - return tokenRangeSize.divide(BigInteger.valueOf(size)) + return tokenRangeSize + .divide(BigInteger.valueOf(size)) .multiply(BigInteger.valueOf(position)) .add(BigInteger.valueOf(offset)) .add(minimumToken); } - /** - * Creates a token given the following parameter - * - * @param my_slot - * -- Slot where this instance has to be. - * @param rac_count - * -- Rac count is the numeber of RAC's - * @param rac_size - * -- number of memberships in the rac - * @param region - * -- name of the DC where it this token is created. - */ - @Override - public String createToken(int my_slot, int rac_count, int rac_size, String region) { - int regionCount = rac_count * rac_size; - return initialToken(regionCount, my_slot, regionOffset(region)).toString(); - } - @Override public String createToken(int my_slot, int totalCount, String region) { return initialToken(totalCount, my_slot, regionOffset(region)).toString(); @@ -103,20 +86,23 @@ public String createToken(int my_slot, int totalCount, String region) { public BigInteger findClosestToken(BigInteger tokenToSearch, List tokenList) { Preconditions.checkArgument(!tokenList.isEmpty(), "token list must not be empty"); List sortedTokens = Ordering.natural().sortedCopy(tokenList); - int index = Ordering.natural().binarySearch(sortedTokens, tokenToSearch); + int index = Collections.binarySearch(sortedTokens, tokenToSearch, Ordering.natural()); if (index < 0) { int i = Math.abs(index) - 1; - if ((i >= sortedTokens.size()) || (i > 0 && sortedTokens.get(i).subtract(tokenToSearch) - .compareTo(tokenToSearch.subtract(sortedTokens.get(i - 1))) > 0)) - --i; + if ((i >= sortedTokens.size()) + || (i > 0 + && sortedTokens + .get(i) + .subtract(tokenToSearch) + .compareTo( + tokenToSearch.subtract(sortedTokens.get(i - 1))) + > 0)) --i; return sortedTokens.get(i); } return sortedTokens.get(index); } - /** - * Create an offset to add to token values by hashing the region name. - */ + /** Create an offset to add to token values by hashing the region name. */ @Override public int regionOffset(String region) { return Math.abs(region.hashCode()); diff --git a/priam/src/main/resources/Priam.properties b/priam/src/main/resources/Priam.properties index a6c72ddca..b392c2fde 100644 --- a/priam/src/main/resources/Priam.properties +++ b/priam/src/main/resources/Priam.properties @@ -1,55 +1,4 @@ -priam.authenticator=org.apache.cassandra.auth.AllowAllAuthenticator -priam.authorizer=org.apache.cassandra.auth.AllowAllAuthorizer -priam.backup.chunksizemb=10 -priam.backup.commitlog.enable= -priam.backup.commitlog.location= -priam.backup.hour=12 -priam.backup.incremental.enable= -priam.backup.racs= -priam.backup.retention= -priam.backup.threads=2 -priam.bootcluster= -priam.cache.location=/var/lib/cassandra/saved_caches -priam.cass.home=/mnt/cassandra -priam.cass.manual.start.enable= -priam.cass.process= -priam.cass.startscript=/mnt/cassandra/bin/cassandra -priam.cass.stopscript=/mnt/cassandra/bin/cassandra priam.clustername=cass_cluster -priam.commitlog.location=/var/lib/cassandra/commitlog -priam.compaction.throughput= -priam.data.location=/var/lib/cassandra/data priam.direct.memory.size.m1.large=1G -priam.endpoint_snitch=org.apache.cassandra.locator.Ec2Snitch priam.heap.newgen.size.m1.large=2G priam.heap.size.m1.large=4G -priam.hint.delay= -priam.hint.window= -priam.jmx.port=7199 -priam.keyCache.count= -priam.keyCache.size= -priam.localbootstrap.enable= -priam.memory.compaction.limit= -priam.memtabletotalspace= -priam.multiregion.enable=false -priam.multithreaded.compaction= -priam.partitioner=org.apache.cassandra.dht.RandomPartitioner -priam.restore.closesttoken= -priam.restore.keyspaces= -priam.restore.prefix= -priam.restore.snapshot= -priam.restore.threads=8 -priam.rowCache.count= -priam.rowCache.size= -priam.s3.base_dir=mdo-backup -priam.s3.bucket=mdo-cassandra-archive -priam.seed.provider=com.netflix.priam.cassandra.extensions.NFSeedProvider -priam.ssl.storage.port=7001 -priam.storage.port=7000 -priam.streaming.throughput.mb= -priam.target.columnfamily= -priam.target.keyspace= -priam.thrift.port=9160 -priam.upload.throttle= -priam.yamlLocation= -priam.zones.available= diff --git a/priam/src/main/resources/incr-restore-cassandra.yaml b/priam/src/main/resources/incr-restore-cassandra.yaml index d0d4688b0..f65e88363 100755 --- a/priam/src/main/resources/incr-restore-cassandra.yaml +++ b/priam/src/main/resources/incr-restore-cassandra.yaml @@ -333,65 +333,6 @@ native_transport_port: 9042 # native_transport_min_threads: 16 # native_transport_max_threads: 128 - -# Whether to start the thrift rpc server. -start_rpc: true -# The address to bind the Thrift RPC service to -- clients connect -# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if -# you want Thrift to listen on all interfaces. -# -# Leaving this blank has the same effect it does for ListenAddress, -# (i.e. it will be based on the configured hostname of the node). -rpc_address: localhost -# port for Thrift to listen for clients on -rpc_port: 9160 - -# enable or disable keepalive on rpc connections -rpc_keepalive: true - -# Cassandra provides three out-of-the-box options for the RPC Server: -# -# sync -> One thread per thrift connection. For a very large number of clients, memory -# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size -# per thread, and that will correspond to your use of virtual memory (but physical memory -# may be limited depending on use of stack space). -# -# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled -# asynchronously using a small number of threads that does not vary with the amount -# of thrift clients (and thus scales well to many clients). The rpc requests are still -# synchronous (one thread per active request). -# -# The default is sync because on Windows hsha is about 30% slower. On Linux, -# sync/hsha performance is about the same, with hsha of course using less memory. -# -# Alternatively, can provide your own RPC server by providing the fully-qualified class name -# of an o.a.c.t.TServerFactory that can create an instance of it. -rpc_server_type: sync - -# Uncomment rpc_min|max_thread to set request pool size limits. -# -# Regardless of your choice of RPC server (see above), the number of maximum requests in the -# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -# RPC server, it also dictates the number of clients that can be connected at all). -# -# The default is unlimited and thus provide no protection against clients overwhelming the server. You are -# encouraged to set a maximum that makes sense for you in production, but do keep in mind that -# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. -# -# rpc_min_threads: 16 -# rpc_max_threads: 2048 - -# uncomment to set socket buffer sizes on rpc connections -# rpc_send_buff_size_in_bytes: -# rpc_recv_buff_size_in_bytes: - -# Frame size for thrift (maximum field length). -thrift_framed_transport_size_in_mb: 15 - -# The max length of a thrift message, including all fields and -# internal thrift overhead. -thrift_max_message_length_in_mb: 16 - # Set to true to have Cassandra create a hard link to each sstable # flushed or streamed locally in a backups/ subdirectory of the # Keyspace data. Removing these links is the operator's @@ -563,58 +504,6 @@ dynamic_snitch_reset_interval_in_ms: 600000 # until the pinned host was 20% worse than the fastest. dynamic_snitch_badness_threshold: 0.1 -# request_scheduler -- Set this to a class that implements -# RequestScheduler, which will schedule incoming client requests -# according to the specific policy. This is useful for multi-tenancy -# with a single Cassandra cluster. -# NOTE: This is specifically for requests from the client and does -# not affect inter node communication. -# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -# client requests to a node with a separate queue for each -# request_scheduler_id. The scheduler is further customized by -# request_scheduler_options as described below. -request_scheduler: org.apache.cassandra.scheduler.NoScheduler - -# Scheduler Options vary based on the type of scheduler -# NoScheduler - Has no options -# RoundRobin -# - throttle_limit -- The throttle_limit is the number of in-flight -# requests per client. Requests beyond -# that limit are queued up until -# running requests can complete. -# The value of 80 here is twice the number of -# concurrent_reads + concurrent_writes. -# - default_weight -- default_weight is optional and allows for -# overriding the default which is 1. -# - weights -- Weights are optional and will default to 1 or the -# overridden default_weight. The weight translates into how -# many requests are handled during each turn of the -# RoundRobin, based on the scheduler id. -# -# request_scheduler_options: -# throttle_limit: 80 -# default_weight: 5 -# weights: -# Keyspace1: 1 -# Keyspace2: 5 - -# request_scheduler_id -- An identifer based on which to perform -# the request scheduling. Currently the only valid option is keyspace. -# request_scheduler_id: keyspace - -# index_interval controls the sampling of entries from the primrary -# row index in terms of space versus time. The larger the interval, -# the smaller and less effective the sampling will be. In technicial -# terms, the interval coresponds to the number of index entries that -# are skipped between taking each sample. All the sampled entries -# must fit in memory. Generally, a value between 128 and 512 here -# coupled with a large key cache size on CFs results in the best trade -# offs. This value is not often changed, however if you have many -# very small rows (many to an OS page), then increasing this will -# often lower memory usage without a impact on performance. -index_interval: 128 - # Enable or disable inter-node encryption # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher diff --git a/priam/src/test/groovy/com.netflix.priam.scheduler/TestFlushTask.groovy b/priam/src/test/groovy/com.netflix.priam.scheduler/TestFlushTask.groovy deleted file mode 100644 index ab8630706..000000000 --- a/priam/src/test/groovy/com.netflix.priam.scheduler/TestFlushTask.groovy +++ /dev/null @@ -1,82 +0,0 @@ -package com.netflix.priam.scheduler - -import com.netflix.priam.config.FakeConfiguration -import com.netflix.priam.cluster.management.Flush -import spock.lang.Specification -import spock.lang.Unroll - -/** - Created by aagrawal on 7/15/17. - */ -@Unroll -class TestFlushTask extends Specification { - - def "Exception for value #flushSchedulerType, #flushCronExpression, #flushInterval"() { - when: - Flush.getTimer(new FlushConfiguration(flushSchedulerType, flushCronExpression, flushInterval)) - - then: - def error = thrown(expectedException) - - where: - flushSchedulerType | flushCronExpression | flushInterval || expectedException - "sdf" | null | null || UnsupportedTypeException - "hour" | null | "2" || IllegalArgumentException - "hour" | "0 0 2 * * ?" | "2" || IllegalArgumentException - "cron" | "abc" | null || IllegalArgumentException - "cron" | "abc" | "daily=2" || IllegalArgumentException - "cron" | null | "daily=2" || IllegalArgumentException - "hour" | null | "hour=2,daily=2" || IllegalArgumentException - } - - def "SchedulerType for value #flushSchedulerType, #flushCronExpression, #flushInterval is null"() { - expect: - Flush.getTimer(new FlushConfiguration(flushSchedulerType, flushCronExpression, flushInterval)) == result - - where: - flushSchedulerType | flushCronExpression | flushInterval || result - "hour" | null | null || null - "cron" | "-1" | null || null - "hour" | "abc" | null || null - "cron" | "-1" | "abc" || null - } - - def "SchedulerType for value #flushSchedulerType, #flushCronExpression, #flushInterval is #result"() { - expect: - Flush.getTimer(new FlushConfiguration(flushSchedulerType, flushCronExpression, flushInterval)).getCronExpression() == result - - where: - flushSchedulerType | flushCronExpression | flushInterval || result - "hour" | null | "daily=2" || "0 0 2 * * ?" - "hour" | null | "hour=2" || "0 2 0/1 * * ?" - "cron" | "0 0 0/1 1/1 * ? *" | null || "0 0 0/1 1/1 * ? *" - "cron" | "0 0 0/1 1/1 * ? *" | "daily=2" || "0 0 0/1 1/1 * ? *" - } - - - private class FlushConfiguration extends FakeConfiguration { - private String flushSchedulerType, flushCronExpression, flushInterval - - FlushConfiguration(String flushSchedulerType, String flushCronExpression, String flushInterval) { - this.flushCronExpression = flushCronExpression - this.flushSchedulerType = flushSchedulerType - this.flushInterval = flushInterval - } - - @Override - SchedulerType getFlushSchedulerType() throws UnsupportedTypeException { - return SchedulerType.lookup(flushSchedulerType) - } - - @Override - String getFlushCronExpression() { - return flushCronExpression - } - - @Override - String getFlushInterval() { - return flushInterval - } - } - -} diff --git a/priam/src/test/groovy/com.netflix.priam.scheduler/TestSchedulerType.groovy b/priam/src/test/groovy/com.netflix.priam.scheduler/TestSchedulerType.groovy deleted file mode 100644 index 1e48dbe82..000000000 --- a/priam/src/test/groovy/com.netflix.priam.scheduler/TestSchedulerType.groovy +++ /dev/null @@ -1,52 +0,0 @@ -package com.netflix.priam.scheduler - -/** - * Created by aagrawal on 3/16/17. - * This is used to test SchedulerType with all the values you might get. - */ -import spock.lang.* - -@Unroll -class TestSchedulerType extends Specification{ - - def "Exception for value #schedulerType , #acceptNullorEmpty , #acceptIllegalValue"() { - when: - SchedulerType.lookup(schedulerType, acceptNullorEmpty, acceptIllegalValue) - - then: - def error = thrown(expectedException) - - where: - schedulerType | acceptNullorEmpty | acceptIllegalValue || expectedException - "sdf" | true | false || UnsupportedTypeException - "" | false | true || UnsupportedTypeException - null | false | true || UnsupportedTypeException - - } - - def "SchedulerType for value #schedulerType , #acceptNullorEmpty , #acceptIllegalValue is #result"() { - expect: - SchedulerType.lookup(schedulerType, acceptNullorEmpty, acceptIllegalValue) == result - - where: - schedulerType | acceptNullorEmpty | acceptIllegalValue || result - "hour" | true | true || SchedulerType.HOUR - "Hour" | true | true || SchedulerType.HOUR - "HOUR" | true | true || SchedulerType.HOUR - "hour" | true | false || SchedulerType.HOUR - "Hour" | true | false || SchedulerType.HOUR - "HOUR" | true | false || SchedulerType.HOUR - "hour" | false | false || SchedulerType.HOUR - "Hour" | false | false || SchedulerType.HOUR - "HOUR" | false | false || SchedulerType.HOUR - "hour" | false | true || SchedulerType.HOUR - "Hour" | false | true || SchedulerType.HOUR - "HOUR" | false | true || SchedulerType.HOUR - "" | true | false || null - null | true | false || null - "sdf" | false | true || null - "sdf" | true | true || null - } - - -} \ No newline at end of file diff --git a/priam/src/test/groovy/com.netflix.priam/backup/TestBackupRestoreUtil.groovy b/priam/src/test/groovy/com.netflix.priam/backup/TestBackupRestoreUtil.groovy index a70c02ce6..a196a44ab 100644 --- a/priam/src/test/groovy/com.netflix.priam/backup/TestBackupRestoreUtil.groovy +++ b/priam/src/test/groovy/com.netflix.priam/backup/TestBackupRestoreUtil.groovy @@ -1,3 +1,19 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package com.netflix.priam.backup import spock.lang.Specification @@ -8,44 +24,45 @@ import spock.lang.Unroll */ @Unroll class TestBackupRestoreUtil extends Specification { - def "IsFilter for KS #keyspace with configuration #configKeyspaceFilter is #result"() { + def "IsFilter for KS #keyspace and CF #columnfamily with configuration include #configIncludeFilter and exclude #configExcludeFilter is #result"() { expect: - new BackupRestoreUtil(configKeyspaceFilter, configCFFilter).isFiltered(BackupRestoreUtil.DIRECTORYTYPE.KEYSPACE, keyspace, columnfamily) == result + new BackupRestoreUtil(configIncludeFilter, configExcludeFilter).isFiltered(keyspace, columnfamily) == result where: - configKeyspaceFilter | configCFFilter | keyspace | columnfamily || result - "abc" | null | "abc" | null || true - "abc" | "ab.ab" | "ab" | null || false - "abc" | null | "ab" | null || false - "abc,def" | null | "abc" | null || true - "abc,def" | null | "def" | null || true - "abc,def" | null | "ab" | null || false - "abc,def" | null | "df" | null || false - "ab.*" | null | "ab" | null || true - "ab.*,def" | null | "ab" | null || true - "ab.*,de.*" | null | "ab" | null || true - "ab.*,de.*" | null | "abab" | null || true - "ab.*,de.*" | null | "defg" | null || true - null | null | "defg" | null || false + configIncludeFilter | configExcludeFilter | keyspace | columnfamily || result + null | null | "defg" | "gh" || false + "abc.*" | null | "abc" | "cd" || false + "abc.*" | null | "ab" | "cd" || true + null | "abc.de" | "abc" | "def" || false + null | "abc.de" | "abc" | "de" || true + "abc.*,def.*" | null | "abc" | "cd" || false + "abc.*,def.*" | null | "def" | "ab" || false + "abc.*,def.*" | null | "ab" | "cd" || true + "abc.*,def.*" | null | "df" | "ab" || true + null | "abc.de,fg.hi" | "abc" | "def" || false + null | "abc.de,fg.hi" | "abc" | "de" || true + null | "abc.de,fg.hi" | "fg" | "hijk" || false + null | "abc.de,fg.hi" | "fg" | "hi" || true + "abc.*" | "ab.ab" | "ab" | "cd" || true + "abc.*" | "ab.ab" | "ab" | "ab" || true + "abc.*" | "abc.ab" | "abc" | "ab" || true + "abc.*" | "abc.ab" | "abc" | "cd" || false + "abc.cd" | "abc.*" | "abc" | "cd" || true + "abc.*" | "abc.*" | "abc" | "cd" || true + "abc.*,def.*" | "abc.*" | "def" | "ab" || false } - def "IsFilter for CF #columnfamily with configuration #configCFFilter is #result"() { - expect: - new BackupRestoreUtil(configKeyspaceFilter, configCFFilter).isFiltered(BackupRestoreUtil.DIRECTORYTYPE.CF, keyspace, columnfamily) == result - where: - configKeyspaceFilter | configCFFilter | keyspace | columnfamily || result - "abc" | null | "abc" | null || false - "abc" | "ab.ab" | "ks" | "ab" || false - "abc" | "ab.ab" | "ab" | "ab.ab" || true - "abc" | "ab.ab,de.fg" | "ab" | "ab.ab" || true - "abc" | "ab.ab,de.fg" | "de" | "fg" || true - null | "abc.de.*" | "abc" | "def" || true - null | "abc.de.*" | "abc" | "abc.def" || true - null | "abc.de.*,fg.hi.*" | "abc" | "def" || true - null | "abc.de.*,fg.hi.*" | "abc" | "abc.def" || true - null | "abc.de.*,fg.hi.*" | "fg" | "hijk" || true - null | "abc.de.*,fg.hi.*" | "fg" | "fg.hijk" || true + def "Expected exception KS #keyspace and CF #columnfamily with configuration include #configIncludeFilter and exclude #configExcludeFilter"() { + when: + new BackupRestoreUtil(configIncludeFilter, configExcludeFilter).isFiltered(keyspace, columnfamily) + then: + thrown(ExcpectedException) + + where: + configIncludeFilter | configExcludeFilter | keyspace | columnfamily || ExcpectedException + null | "def" | "defg" | null || IllegalArgumentException + "abc" | null | null | "cd" || IllegalArgumentException } } diff --git a/priam/src/test/groovy/com.netflix.priam/backup/TestBackupScheduler.groovy b/priam/src/test/groovy/com.netflix.priam/backup/TestBackupScheduler.groovy index bcd49664d..d5baaf0b1 100644 --- a/priam/src/test/groovy/com.netflix.priam/backup/TestBackupScheduler.groovy +++ b/priam/src/test/groovy/com.netflix.priam/backup/TestBackupScheduler.groovy @@ -1,8 +1,22 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package com.netflix.priam.backup import com.netflix.priam.config.FakeConfiguration -import com.netflix.priam.scheduler.SchedulerType -import com.netflix.priam.scheduler.UnsupportedTypeException import spock.lang.Specification import spock.lang.Unroll @@ -11,26 +25,22 @@ import spock.lang.Unroll */ @Unroll class TestBackupScheduler extends Specification { - def "IsBackupEnabled for SchedulerType #schedulerType with hour #configHour and CRON #configCRON is #result"() { + def "IsBackupEnabled CRON #configCRON is #result"() { expect: - SnapshotBackup.isBackupEnabled(new BackupConfiguration(schedulerType, configCRON, configHour)) == result + SnapshotBackup.isBackupEnabled(new BackupConfiguration(configCRON)) == result where: - schedulerType | configCRON | configHour || result - "hour" | null | -1 || false - "hour" | "0 0 9 1/1 * ? *" | -1 || false - "hour" | null | 1 || true - "cron" | "-1" | 1 || false - "cron" | "-1" | -1 || false - "cron" | "0 0 9 1/1 * ? *" | -1 || true + configCRON || result + "-1" || false + "0 0 9 1/1 * ? *" || true } def "Exception for illegal value of Snapshot CRON expression , #configCRON"() { when: - SnapshotBackup.isBackupEnabled(new BackupConfiguration("cron", configCRON, 1)) + SnapshotBackup.isBackupEnabled(new BackupConfiguration(configCRON)) then: - def error = thrown(expectedException) + thrown(expectedException) where: configCRON || expectedException @@ -38,40 +48,26 @@ class TestBackupScheduler extends Specification { "0 9 1/1 * ? *"|| Exception } - def "Validate CRON for backup for SchedulerType #schedulerType with hour #configHour and CRON #configCRON is #result"() { + def "Validate CRON for backup CRON #configCRON is #result"() { expect: - SnapshotBackup.getTimer(new BackupConfiguration(schedulerType, configCRON, configHour)).cronExpression == result + SnapshotBackup.getTimer(new BackupConfiguration(configCRON)).cronExpression == result where: - schedulerType | configCRON | configHour || result - "hour" | null | 1 || "0 1 1 * * ?" - "cron" | "0 0 9 1/1 * ? *" | -1 || "0 0 9 1/1 * ? *" + configCRON || result + "0 0 9 1/1 * ? *" || "0 0 9 1/1 * ? *" } private class BackupConfiguration extends FakeConfiguration { - private String backupSchedulerType, backupCronExpression - private int backupHour + private String backupCronExpression - BackupConfiguration(String backupSchedulerType, String backupCronExpression, int backupHour) { + BackupConfiguration(String backupCronExpression) { this.backupCronExpression = backupCronExpression - this.backupSchedulerType = backupSchedulerType - this.backupHour = backupHour - } - - @Override - SchedulerType getBackupSchedulerType() throws UnsupportedTypeException { - return SchedulerType.lookup(backupSchedulerType) } @Override String getBackupCronExpression() { return backupCronExpression } - - @Override - int getBackupHour() { - return backupHour - } } } diff --git a/priam/src/test/groovy/com/netflix/priam/cluser/management/TestCompaction.groovy b/priam/src/test/groovy/com/netflix/priam/cluser/management/TestCompaction.groovy index 273889ed0..90fe7db80 100644 --- a/priam/src/test/groovy/com/netflix/priam/cluser/management/TestCompaction.groovy +++ b/priam/src/test/groovy/com/netflix/priam/cluser/management/TestCompaction.groovy @@ -1,11 +1,27 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package com.netflix.priam.cluser.management import com.google.inject.Guice -import com.netflix.priam.config.FakeConfiguration import com.netflix.priam.backup.BRTestModule import com.netflix.priam.cluster.management.Compaction -import com.netflix.priam.defaultimpl.CassandraOperations -import com.netflix.priam.utils.CassandraMonitor +import com.netflix.priam.config.FakeConfiguration +import com.netflix.priam.connection.CassandraOperations +import com.netflix.priam.health.CassandraMonitor import mockit.Mock import mockit.MockUp import spock.lang.Shared @@ -132,8 +148,9 @@ class TestCompaction extends Specification { 1 || 0 } - private int concurrentRuns(int size) { - CassandraMonitor.setIsCassadraStarted(); + + private static int concurrentRuns(int size) { + CassandraMonitor.setIsCassadraStarted() ExecutorService threads = Executors.newFixedThreadPool(size) List> torun = new ArrayList<>(size) for (int i = 0; i < size; i++) { @@ -156,7 +173,7 @@ class TestCompaction extends Specification { //We expect exception here. try{ fut.get() - }catch(Exception e){ + }catch(Exception ignored){ noOfBadRun++ } } diff --git a/priam/src/test/groovy/com/netflix/priam/cluser/management/TestFlushTask.groovy b/priam/src/test/groovy/com/netflix/priam/cluser/management/TestFlushTask.groovy new file mode 100644 index 000000000..2fa8766fb --- /dev/null +++ b/priam/src/test/groovy/com/netflix/priam/cluser/management/TestFlushTask.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.cluser.management + +import com.netflix.priam.cluster.management.Flush +import com.netflix.priam.config.FakeConfiguration +import spock.lang.Specification +import spock.lang.Unroll + +/** + Created by aagrawal on 7/15/17. + */ +@Unroll +class TestFlushTask extends Specification { + def "Exception for value #flushCronExpression"() { + when: + Flush.getTimer(new FlushConfiguration(flushCronExpression)) + + then: + thrown(expectedException) + + where: + flushCronExpression || expectedException + "abc" || IllegalArgumentException + null || IllegalArgumentException + } + + def "SchedulerType for value #flushSchedulerType, #flushCronExpression, #flushInterval is null"() { + expect: + Flush.getTimer(new FlushConfiguration(flushCronExpression)) == result + + where: + flushCronExpression || result + "-1" || null + } + + def "SchedulerType for value #flushCronExpression is #result"() { + expect: + Flush.getTimer(new FlushConfiguration(flushCronExpression)).getCronExpression() == result + + where: + flushCronExpression || result + "0 0 0/1 1/1 * ? *" || "0 0 0/1 1/1 * ? *" + } + + private class FlushConfiguration extends FakeConfiguration { + private String flushCronExpression + + FlushConfiguration(String flushCronExpression) { + this.flushCronExpression = flushCronExpression + } + + @Override + String getFlushCronExpression() { + return flushCronExpression + } + } +} \ No newline at end of file diff --git a/priam/src/test/java/com/netflix/priam/TestModule.java b/priam/src/test/java/com/netflix/priam/TestModule.java index 390411303..0f4ceffbe 100644 --- a/priam/src/test/java/com/netflix/priam/TestModule.java +++ b/priam/src/test/java/com/netflix/priam/TestModule.java @@ -32,6 +32,8 @@ import com.netflix.priam.identity.FakePriamInstanceFactory; import com.netflix.priam.identity.IMembership; import com.netflix.priam.identity.IPriamInstanceFactory; +import com.netflix.priam.identity.config.FakeInstanceInfo; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.utils.FakeSleeper; import com.netflix.priam.utils.Sleeper; import com.netflix.spectator.api.DefaultRegistry; @@ -40,21 +42,23 @@ import org.quartz.SchedulerFactory; import org.quartz.impl.StdSchedulerFactory; - @Ignore -public class TestModule extends AbstractModule -{ +public class TestModule extends AbstractModule { @Override - protected void configure() - { - bind(IConfiguration.class).toInstance( - new FakeConfiguration(FakeConfiguration.FAKE_REGION, "fake-app", "az1", "fakeInstance1")); + protected void configure() { + bind(IConfiguration.class).toInstance(new FakeConfiguration("fake-app")); bind(IBackupRestoreConfig.class).to(FakeBackupRestoreConfig.class); - bind(IPriamInstanceFactory.class).to(FakePriamInstanceFactory.class); + bind(InstanceInfo.class) + .toInstance(new FakeInstanceInfo("fakeInstance1", "az1", "us-east-1")); + + bind(IPriamInstanceFactory.class).to(FakePriamInstanceFactory.class).in(Scopes.SINGLETON); bind(SchedulerFactory.class).to(StdSchedulerFactory.class).in(Scopes.SINGLETON); - bind(IMembership.class).toInstance(new FakeMembership( - ImmutableList.of("fakeInstance1", "fakeInstance2", "fakeInstance3"))); + bind(IMembership.class) + .toInstance( + new FakeMembership( + ImmutableList.of( + "fakeInstance1", "fakeInstance2", "fakeInstance3"))); bind(ICredential.class).to(FakeCredentials.class).in(Scopes.SINGLETON); bind(IBackupFileSystem.class).to(NullBackupFileSystem.class); bind(Sleeper.class).to(FakeSleeper.class); diff --git a/priam/src/test/java/com/netflix/priam/aws/TestRemoteBackupPath.java b/priam/src/test/java/com/netflix/priam/aws/TestRemoteBackupPath.java new file mode 100644 index 000000000..72ede456e --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/aws/TestRemoteBackupPath.java @@ -0,0 +1,317 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.aws; + +import com.google.inject.Guice; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.cryptography.CryptographyAlgorithm; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import org.junit.Assert; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 11/23/18. */ +public class TestRemoteBackupPath { + private static final Logger logger = LoggerFactory.getLogger(TestRemoteBackupPath.class); + private Provider pathFactory; + + public TestRemoteBackupPath() { + pathFactory = + Guice.createInjector(new BRTestModule()).getProvider(AbstractBackupPath.class); + } + + @Test + public void testV1BackupPathsSST() { + Path path = + Paths.get("target/data", "keyspace1", "columnfamily1", "backup", "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.SST); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertEquals("keyspace1", abstractBackupPath.getKeyspace()); + Assert.assertEquals("columnfamily1", abstractBackupPath.getColumnFamily()); + Assert.assertEquals(BackupFileType.SST, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + Assert.assertEquals( + 0, + abstractBackupPath + .getTime() + .toInstant() + .toEpochMilli()); // Since file do not exist. + + // Verify toRemote and parseRemote. + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + Assert.assertEquals(abstractBackupPath.getTime(), abstractBackupPath2.getTime()); + } + + @Test + public void testV1BackupPathsSnap() { + Path path = + Paths.get( + "target/data", + "keyspace1", + "columnfamily1", + "snapshot", + "201801011201", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.SNAP); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertEquals("keyspace1", abstractBackupPath.getKeyspace()); + Assert.assertEquals("columnfamily1", abstractBackupPath.getColumnFamily()); + Assert.assertEquals(BackupFileType.SNAP, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + Assert.assertEquals( + "201801011201", DateUtil.formatyyyyMMddHHmm(abstractBackupPath.getTime())); + + // Verify toRemote and parseRemote. + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + Assert.assertEquals(abstractBackupPath.getTime(), abstractBackupPath2.getTime()); + } + + @Test + public void testV1BackupPathsMeta() { + Path path = Paths.get("target/data", "meta.json"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.META); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertNull(abstractBackupPath.getKeyspace()); + Assert.assertNull(abstractBackupPath.getColumnFamily()); + Assert.assertEquals(BackupFileType.META, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + + // Verify toRemote and parseRemote. + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + Assert.assertEquals(abstractBackupPath.getTime(), abstractBackupPath2.getTime()); + } + + @Test + public void testV2BackupPathSST() { + Path path = + Paths.get("target/data", "keyspace1", "columnfamily1", "backup", "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.SST_V2); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertEquals("keyspace1", abstractBackupPath.getKeyspace()); + Assert.assertEquals("columnfamily1", abstractBackupPath.getColumnFamily()); + Assert.assertEquals("SNAPPY", abstractBackupPath.getCompression().name()); + Assert.assertEquals(BackupFileType.SST_V2, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + + // Verify toRemote and parseRemote. + Instant now = DateUtil.getInstant(); + abstractBackupPath.setLastModified(now); + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + Assert.assertEquals( + now.toEpochMilli() / 1_000L * 1_000L, + abstractBackupPath2.getLastModified().toEpochMilli()); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + } + + @Test + public void testV2BackupPathMeta() { + Path path = Paths.get("target/data", "meta_v2_201801011201.json"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.META_V2); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertNull(abstractBackupPath.getKeyspace()); + Assert.assertNull(abstractBackupPath.getColumnFamily()); + Assert.assertEquals(BackupFileType.META_V2, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + Assert.assertEquals(CryptographyAlgorithm.PLAINTEXT, abstractBackupPath.getEncryption()); + + // Verify toRemote and parseRemote. + Instant now = DateUtil.getInstant(); + abstractBackupPath.setLastModified(now); + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + + Assert.assertEquals("SNAPPY", abstractBackupPath.getCompression().name()); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + Assert.assertEquals( + now.toEpochMilli() / 1_000L * 1_000L, + abstractBackupPath2.getLastModified().toEpochMilli()); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + } + + @Test + public void testV2BackupPathSecondaryIndex() { + Path path = + Paths.get( + "target/data", + "keyspace1", + "columnfamily1", + "backups", + ".columnfamily1_field1_idx", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.SECONDARY_INDEX_V2); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertEquals("keyspace1", abstractBackupPath.getKeyspace()); + Assert.assertEquals("columnfamily1", abstractBackupPath.getColumnFamily()); + Assert.assertEquals("SNAPPY", abstractBackupPath.getCompression().name()); + Assert.assertEquals(BackupFileType.SECONDARY_INDEX_V2, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + + // Verify toRemote and parseRemote. + Instant now = DateUtil.getInstant(); + abstractBackupPath.setLastModified(now); + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + long correctLastModified = +now.toEpochMilli() / 1_000L * 1_000L; + Assert.assertEquals( + "casstestbackup/1049_fake-app/1808575600/SECONDARY_INDEX_V2/" + + correctLastModified + + "/keyspace1/columnfamily1/.columnfamily1_field1_idx/SNAPPY/PLAINTEXT/mc-1234-Data.db", + remotePath); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + Assert.assertEquals( + correctLastModified, abstractBackupPath2.getLastModified().toEpochMilli()); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + } + + @Test + public void testV2SnapshotPathSecondaryIndex() { + Path path = + Paths.get( + "target/data", + "keyspace1", + "columnfamily1", + "snapshots", + "snap_v2_19700101000", + ".columnfamily1_field1_idx", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + abstractBackupPath.parseLocal(path.toFile(), BackupFileType.SECONDARY_INDEX_V2); + + // Verify parse local + Assert.assertEquals( + 0, abstractBackupPath.getLastModified().toEpochMilli()); // File do not exist. + Assert.assertEquals("keyspace1", abstractBackupPath.getKeyspace()); + Assert.assertEquals("columnfamily1", abstractBackupPath.getColumnFamily()); + Assert.assertEquals("SNAPPY", abstractBackupPath.getCompression().name()); + Assert.assertEquals(BackupFileType.SECONDARY_INDEX_V2, abstractBackupPath.getType()); + Assert.assertEquals(path.toFile(), abstractBackupPath.getBackupFile()); + + // Verify toRemote and parseRemote. + Instant now = DateUtil.getInstant(); + abstractBackupPath.setLastModified(now); + String remotePath = abstractBackupPath.getRemotePath(); + logger.info(remotePath); + long correctLastModified = +now.toEpochMilli() / 1_000L * 1_000L; + Assert.assertEquals( + "casstestbackup/1049_fake-app/1808575600/SECONDARY_INDEX_V2/" + + correctLastModified + + "/keyspace1/columnfamily1/.columnfamily1_field1_idx/SNAPPY/PLAINTEXT/mc-1234-Data.db", + remotePath); + + AbstractBackupPath abstractBackupPath2 = pathFactory.get(); + abstractBackupPath2.parseRemote(remotePath); + Assert.assertEquals( + correctLastModified, abstractBackupPath2.getLastModified().toEpochMilli()); + validateAbstractBackupPath(abstractBackupPath, abstractBackupPath2); + } + + @Test + public void testUnknownBackupFolder() { + Path path = + Paths.get( + "target/data", + "keyspace1", + "columnfamily1", + "foo", // foo is invalid + ".columnfamily1_field1_idx", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + Assertions.assertThrows( + NullPointerException.class, + () -> + abstractBackupPath.parseLocal( + path.toFile(), BackupFileType.SECONDARY_INDEX_V2)); + } + + @Test + public void testRemoteV2Prefix() { + Path path = Paths.get("test_backup"); + AbstractBackupPath abstractBackupPath = pathFactory.get(); + Assert.assertEquals( + "casstestbackup/1049_fake-app/1808575600/META_V2", + abstractBackupPath.remoteV2Prefix(path, BackupFileType.META_V2).toString()); + + path = Paths.get("s3-bucket-name", "fake_base_dir", "-6717_random_fake_app"); + Assert.assertEquals( + "fake_base_dir/-6717_random_fake_app/1808575600/META_V2", + abstractBackupPath.remoteV2Prefix(path, BackupFileType.META_V2).toString()); + } + + private void validateAbstractBackupPath(AbstractBackupPath abp1, AbstractBackupPath abp2) { + Assert.assertEquals(abp1.getKeyspace(), abp2.getKeyspace()); + Assert.assertEquals(abp1.getColumnFamily(), abp2.getColumnFamily()); + Assert.assertEquals(abp1.getFileName(), abp2.getFileName()); + Assert.assertEquals(abp1.getType(), abp2.getType()); + Assert.assertEquals(abp1.getCompression(), abp2.getCompression()); + Assert.assertEquals(abp1.getEncryption(), abp2.getEncryption()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/BRTestModule.java b/priam/src/test/java/com/netflix/priam/backup/BRTestModule.java index 7c5e0ceb3..6a1425f49 100644 --- a/priam/src/test/java/com/netflix/priam/backup/BRTestModule.java +++ b/priam/src/test/java/com/netflix/priam/backup/BRTestModule.java @@ -22,7 +22,9 @@ import com.google.inject.name.Names; import com.netflix.priam.aws.auth.IS3Credential; import com.netflix.priam.aws.auth.S3RoleAssumptionCredential; -import com.netflix.priam.backup.identity.FakeInstanceEnvIdentity; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.backupv2.MetaV1Proxy; +import com.netflix.priam.backupv2.MetaV2Proxy; import com.netflix.priam.config.FakeBackupRestoreConfig; import com.netflix.priam.config.FakeConfiguration; import com.netflix.priam.config.IBackupRestoreConfig; @@ -32,42 +34,62 @@ import com.netflix.priam.cryptography.pgp.PgpCryptography; import com.netflix.priam.defaultimpl.FakeCassandraProcess; import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.identity.*; +import com.netflix.priam.identity.FakeMembership; +import com.netflix.priam.identity.FakePriamInstanceFactory; +import com.netflix.priam.identity.IMembership; +import com.netflix.priam.identity.IPriamInstanceFactory; +import com.netflix.priam.identity.config.FakeInstanceInfo; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.restore.IPostRestoreHook; import com.netflix.priam.utils.FakeSleeper; import com.netflix.priam.utils.Sleeper; import com.netflix.spectator.api.DefaultRegistry; import com.netflix.spectator.api.Registry; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.Collections; import org.junit.Ignore; import org.quartz.SchedulerFactory; import org.quartz.impl.StdSchedulerFactory; -import java.util.Arrays; @Ignore -public class BRTestModule extends AbstractModule -{ - +public class BRTestModule extends AbstractModule { + @Override - protected void configure() - { - bind(IConfiguration.class).toInstance(new FakeConfiguration(FakeConfiguration.FAKE_REGION, "fake-app", "az1", "fakeInstance1")); + protected void configure() { + bind(IConfiguration.class).toInstance(new FakeConfiguration("fake-app")); bind(IBackupRestoreConfig.class).to(FakeBackupRestoreConfig.class); + bind(InstanceInfo.class) + .toInstance(new FakeInstanceInfo("fakeInstance1", "az1", "us-east-1")); + bind(IPriamInstanceFactory.class).to(FakePriamInstanceFactory.class); bind(SchedulerFactory.class).to(StdSchedulerFactory.class).in(Scopes.SINGLETON); - bind(IMembership.class).toInstance(new FakeMembership(Arrays.asList("fakeInstance1"))); + bind(IMembership.class) + .toInstance(new FakeMembership(Collections.singletonList("fakeInstance1"))); bind(ICredential.class).to(FakeNullCredential.class).in(Scopes.SINGLETON); - bind(IBackupFileSystem.class).annotatedWith(Names.named("backup")).to(FakeBackupFileSystem.class).in(Scopes.SINGLETON); - bind(IBackupFileSystem.class).annotatedWith(Names.named("incr_restore")).to(FakeBackupFileSystem.class).in(Scopes.SINGLETON); + bind(IBackupFileSystem.class) + .annotatedWith(Names.named("backup")) + .to(FakeBackupFileSystem.class) + .in(Scopes.SINGLETON); bind(Sleeper.class).to(FakeSleeper.class); - bind(IS3Credential.class).annotatedWith(Names.named("awss3roleassumption")).to(S3RoleAssumptionCredential.class); + bind(IS3Credential.class) + .annotatedWith(Names.named("awss3roleassumption")) + .to(S3RoleAssumptionCredential.class); - bind(IBackupFileSystem.class).annotatedWith(Names.named("encryptedbackup")).to(FakedS3EncryptedFileSystem.class); - bind(IFileCryptography.class).annotatedWith(Names.named("filecryptoalgorithm")).to(PgpCryptography.class); - bind(IIncrementalBackup.class).to(IncrementalBackup.class); - bind(InstanceEnvIdentity.class).to(FakeInstanceEnvIdentity.class); + bind(IBackupFileSystem.class) + .annotatedWith(Names.named("encryptedbackup")) + .to(NullBackupFileSystem.class); + bind(IFileCryptography.class) + .annotatedWith(Names.named("filecryptoalgorithm")) + .to(PgpCryptography.class); bind(ICassandraProcess.class).to(FakeCassandraProcess.class); bind(IPostRestoreHook.class).to(FakePostRestoreHook.class); bind(Registry.class).toInstance(new DefaultRegistry()); + bind(IMetaProxy.class).annotatedWith(Names.named("v1")).to(MetaV1Proxy.class); + bind(IMetaProxy.class).annotatedWith(Names.named("v2")).to(MetaV2Proxy.class); + bind(DynamicRateLimiter.class).to(FakeDynamicRateLimiter.class); + bind(Clock.class).toInstance(Clock.fixed(Instant.EPOCH, ZoneId.systemDefault())); } } diff --git a/priam/src/test/java/com/netflix/priam/backup/FakeBackupFileSystem.java b/priam/src/test/java/com/netflix/priam/backup/FakeBackupFileSystem.java index c653ad295..8807e4b4d 100644 --- a/priam/src/test/java/com/netflix/priam/backup/FakeBackupFileSystem.java +++ b/priam/src/test/java/com/netflix/priam/backup/FakeBackupFileSystem.java @@ -20,169 +20,157 @@ import com.google.inject.Inject; import com.google.inject.Provider; import com.google.inject.Singleton; -import com.netflix.priam.aws.S3BackupPath; -import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import org.apache.commons.io.IOUtils; -import org.json.simple.JSONArray; - +import com.netflix.priam.aws.RemoteBackupPath; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import java.io.File; import java.io.FileWriter; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; +import java.nio.file.Path; +import java.time.Instant; import java.util.*; +import org.json.simple.JSONArray; @Singleton -public class FakeBackupFileSystem implements IBackupFileSystem -{ - private List flist; - public Set downloadedFiles; - public Set uploadedFiles; - public String baseDir, region, clusterName; +public class FakeBackupFileSystem extends AbstractFileSystem { + private List flist = new ArrayList<>(); + public Set downloadedFiles = new HashSet<>(); + public Set uploadedFiles = new HashSet<>(); + private String baseDir; + private String region; + private String clusterName; @Inject - Provider pathProvider; + public FakeBackupFileSystem( + IConfiguration configuration, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + Provider pathProvider) { + super(configuration, backupMetrics, backupNotificationMgr, pathProvider); + } - public void setupTest(List files) - { + public void setupTest(List files) { clearTest(); - flist = new ArrayList(); - for (String file : files) - { - S3BackupPath path = pathProvider.get(); + for (String file : files) { + AbstractBackupPath path = pathProvider.get(); path.parseRemote(file); flist.add(path); } - downloadedFiles = new HashSet(); - uploadedFiles = new HashSet(); } - public void setupTest() - { - clearTest(); - flist = new ArrayList(); - downloadedFiles = new HashSet(); - uploadedFiles = new HashSet(); - } - - public void clearTest() - { - if (flist != null) - flist.clear(); - if (downloadedFiles != null) - downloadedFiles.clear(); + private void clearTest() { + flist.clear(); + downloadedFiles.clear(); + uploadedFiles.clear(); } - public void addFile(String file) - { - S3BackupPath path = pathProvider.get(); + public void addFile(String file) { + AbstractBackupPath path = pathProvider.get(); path.parseRemote(file); flist.add(path); } @SuppressWarnings("unchecked") @Override - public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException - { - try - { - if (path.type == BackupFileType.META) - { - // List all files and generate the file - FileWriter fr = new FileWriter(path.newRestoreFile()); - try - { - JSONArray jsonObj = new JSONArray(); - for (AbstractBackupPath filePath : flist) - { - if (filePath.type == BackupFileType.SNAP) - jsonObj.add(filePath.getRemotePath()); - } - fr.write(jsonObj.toJSONString()); - } - finally - { - IOUtils.closeQuietly(fr); - } - } - downloadedFiles.add(path.getRemotePath()); - System.out.println("Downloading " + path.getRemotePath()); - } - catch (IOException io) - { - throw new BackupRestoreException(io.getMessage(), io); - } - } - - @Override - public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException - { - uploadedFiles.add(path.backupFile.getAbsolutePath()); - } + public Iterator list(String bucket, Date start, Date till) { + String[] paths = bucket.split(String.valueOf(RemoteBackupPath.PATH_SEP)); - @Override - public Iterator list(String bucket, Date start, Date till) - { - String[] paths = bucket.split(String.valueOf(S3BackupPath.PATH_SEP)); - - if( paths.length > 1){ + if (paths.length > 1) { baseDir = paths[1]; region = paths[2]; clusterName = paths[3]; } - - List tmpList = new ArrayList(); - for (AbstractBackupPath path : flist) - { - - if ((path.time.after(start) && path.time.before(till)) || path.time.equals(start) - && path.baseDir.equals(baseDir) && path.clusterName.equals(clusterName) && path.region.equals(region)) - { - tmpList.add(path); + + List tmpList = new ArrayList<>(); + for (AbstractBackupPath path : flist) { + + if ((path.time.after(start) && path.time.before(till)) + || path.time.equals(start) + && path.baseDir.equals(baseDir) + && path.clusterName.equals(clusterName) + && path.region.equals(region)) { + tmpList.add(path); } } return tmpList.iterator(); } @Override - public int getActivecount() - { - // TODO Auto-generated method stub - return 0; + public Iterator listFileSystem(String prefix, String delimiter, String marker) { + ArrayList items = new ArrayList<>(); + flist.stream() + .forEach( + abstractBackupPath -> { + if (abstractBackupPath.getRemotePath().startsWith(prefix)) + items.add(abstractBackupPath.getRemotePath()); + }); + return items.iterator(); } - public void shutdown() - { - //nop + public void shutdown() { + // nop } @Override - public long getBytesUploaded() { + public long getFileSize(String remotePath) throws BackupRestoreException { return 0; } @Override - public long getAWSSlowDownExceptionCounter() { - return 0; + public boolean doesRemoteFileExist(Path remotePath) { + for (AbstractBackupPath abstractBackupPath : flist) { + if (abstractBackupPath.getRemotePath().equalsIgnoreCase(remotePath.toString())) + return true; + } + return false; } @Override - public Iterator listPrefixes(Date date) - { - // TODO Auto-generated method stub - return null; + public void deleteFiles(List remotePaths) throws BackupRestoreException { + remotePaths + .stream() + .forEach( + remotePath -> { + AbstractBackupPath path = pathProvider.get(); + path.parseRemote(remotePath.toString()); + flist.remove(path); + }); } @Override - public void cleanup() - { - // TODO Auto-generated method stub - + public void cleanup() { + clearTest(); } - @Override - public void download(AbstractBackupPath path, OutputStream os, - String diskPath) throws BackupRestoreException { - download(path, os); - } + @Override + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + File localFile = new File(path.newRestoreFile().getAbsolutePath() + suffix); + if (path.getType() == AbstractBackupPath.BackupFileType.META) { + // List all files and generate the file + try (FileWriter fr = new FileWriter(localFile)) { + JSONArray jsonObj = new JSONArray(); + for (AbstractBackupPath filePath : flist) { + if (filePath.type == AbstractBackupPath.BackupFileType.SNAP + && filePath.time.equals(path.time)) { + jsonObj.add(filePath.getRemotePath()); + } + } + fr.write(jsonObj.toJSONString()); + fr.flush(); + } catch (IOException io) { + throw new BackupRestoreException(io.getMessage(), io); + } + } + downloadedFiles.add(path.getRemotePath()); + } + @Override + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + uploadedFiles.add(path.getBackupFile().getAbsolutePath()); + addFile(path.getRemotePath()); + return path.getBackupFile().length(); + } } diff --git a/priam/src/test/java/com/netflix/priam/backup/FakeCredentials.java b/priam/src/test/java/com/netflix/priam/backup/FakeCredentials.java index f82c097a4..b73fe6e10 100644 --- a/priam/src/test/java/com/netflix/priam/backup/FakeCredentials.java +++ b/priam/src/test/java/com/netflix/priam/backup/FakeCredentials.java @@ -20,10 +20,9 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.netflix.priam.cred.ICredential; -public class FakeCredentials implements ICredential -{ - public AWSCredentialsProvider getAwsCredentialProvider() { - // TODO Auto-generated method stub - return null; - } +public class FakeCredentials implements ICredential { + public AWSCredentialsProvider getAwsCredentialProvider() { + // TODO Auto-generated method stub + return null; + } } diff --git a/priam/src/test/java/com/netflix/priam/backup/FakeDynamicRateLimiter.java b/priam/src/test/java/com/netflix/priam/backup/FakeDynamicRateLimiter.java new file mode 100644 index 000000000..2108d3567 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/FakeDynamicRateLimiter.java @@ -0,0 +1,8 @@ +package com.netflix.priam.backup; + +import java.time.Instant; + +public class FakeDynamicRateLimiter implements DynamicRateLimiter { + @Override + public void acquire(AbstractBackupPath dir, Instant target, int tokens) {} +} diff --git a/priam/src/test/java/com/netflix/priam/backup/FakeNullCredential.java b/priam/src/test/java/com/netflix/priam/backup/FakeNullCredential.java index fa0419e2d..5e99fbd46 100644 --- a/priam/src/test/java/com/netflix/priam/backup/FakeNullCredential.java +++ b/priam/src/test/java/com/netflix/priam/backup/FakeNullCredential.java @@ -20,10 +20,9 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.netflix.priam.cred.ICredential; -public class FakeNullCredential implements ICredential -{ - public AWSCredentialsProvider getAwsCredentialProvider() { - // TODO Auto-generated method stub - return null; - } +class FakeNullCredential implements ICredential { + public AWSCredentialsProvider getAwsCredentialProvider() { + // TODO Auto-generated method stub + return null; + } } diff --git a/priam/src/test/java/com/netflix/priam/backup/FakePostRestoreHook.java b/priam/src/test/java/com/netflix/priam/backup/FakePostRestoreHook.java index ef23915ba..df2350392 100644 --- a/priam/src/test/java/com/netflix/priam/backup/FakePostRestoreHook.java +++ b/priam/src/test/java/com/netflix/priam/backup/FakePostRestoreHook.java @@ -18,12 +18,12 @@ import com.netflix.priam.restore.IPostRestoreHook; -public class FakePostRestoreHook implements IPostRestoreHook { +class FakePostRestoreHook implements IPostRestoreHook { public boolean hasValidParameters() { return true; } public void execute() { - //no op + // no op } } diff --git a/priam/src/test/java/com/netflix/priam/backup/FakedS3EncryptedFileSystem.java b/priam/src/test/java/com/netflix/priam/backup/FakedS3EncryptedFileSystem.java deleted file mode 100755 index 50ea79fb1..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/FakedS3EncryptedFileSystem.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup; - -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Date; -import java.util.Iterator; - -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.google.inject.Singleton; -import com.google.inject.name.Named; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.cred.ICredential; -import com.netflix.priam.compress.ICompression; -import com.netflix.priam.cryptography.IFileCryptography; - -@Singleton -public class FakedS3EncryptedFileSystem implements IBackupFileSystem { - - @Inject - public FakedS3EncryptedFileSystem( Provider pathProvider, ICompression compress, final IConfiguration config, ICredential cred - , @Named("filecryptoalgorithm") IFileCryptography fileCryptography - ) { - - } - - @Override - public void download(AbstractBackupPath path, OutputStream os) - throws BackupRestoreException { - // TODO Auto-generated method stub - - } - - @Override - public void download(AbstractBackupPath path, OutputStream os, - String filePath) throws BackupRestoreException { - // TODO Auto-generated method stub - - } - - @Override - public void upload(AbstractBackupPath path, InputStream in) - throws BackupRestoreException { - // TODO Auto-generated method stub - - } - - @Override - public Iterator list(String path, Date start, Date till) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Iterator listPrefixes(Date date) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void cleanup() { - // TODO Auto-generated method stub - - } - - @Override - public int getActivecount() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void shutdown() { - // TODO Auto-generated method stub - - } - - @Override - public long getBytesUploaded() { - return 0; - } - - @Override - public long getAWSSlowDownExceptionCounter() { - return 0; - } - -} diff --git a/priam/src/test/java/com/netflix/priam/backup/NullBackupFileSystem.java b/priam/src/test/java/com/netflix/priam/backup/NullBackupFileSystem.java index a177b24b2..6dbf3941f 100644 --- a/priam/src/test/java/com/netflix/priam/backup/NullBackupFileSystem.java +++ b/priam/src/test/java/com/netflix/priam/backup/NullBackupFileSystem.java @@ -17,72 +17,64 @@ package com.netflix.priam.backup; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Date; +import com.google.inject.Inject; +import com.google.inject.Provider; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import java.nio.file.Path; +import java.time.Instant; +import java.util.Collections; import java.util.Iterator; +import java.util.List; -import com.netflix.priam.backup.AbstractBackupPath; -import com.netflix.priam.backup.BackupRestoreException; -import com.netflix.priam.backup.IBackupFileSystem; +public class NullBackupFileSystem extends AbstractFileSystem { -public class NullBackupFileSystem implements IBackupFileSystem -{ + @Inject + public NullBackupFileSystem( + IConfiguration configuration, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + Provider pathProvider) { + super(configuration, backupMetrics, backupNotificationMgr, pathProvider); + } - @Override - public Iterator list(String bucket, Date start, Date till) - { - return null; + public void shutdown() { + // NOP } @Override - public int getActivecount() - { + public long getFileSize(String remotePath) throws BackupRestoreException { return 0; } - public void shutdown() - { - //NOP - } - @Override - public long getBytesUploaded() { - return 0; + public void deleteFiles(List remotePaths) throws BackupRestoreException { + // Do nothing. } @Override - public long getAWSSlowDownExceptionCounter() { - return 0; + public Iterator listFileSystem(String prefix, String delimiter, String marker) { + return Collections.emptyIterator(); } @Override - public void download(AbstractBackupPath path, OutputStream os) throws BackupRestoreException - { + public void cleanup() { + // TODO Auto-generated method stub } @Override - public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException - { - } + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException {} @Override - public Iterator listPrefixes(Date date) - { - return null; + protected boolean doesRemoteFileExist(Path remotePath) { + return false; } @Override - public void cleanup() - { - // TODO Auto-generated method stub - + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + return 0; } - - @Override - public void download(AbstractBackupPath path, OutputStream os, - String filePath) throws BackupRestoreException { - // TODO Auto-generated method stub - - } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestAbstractFileSystem.java b/priam/src/test/java/com/netflix/priam/backup/TestAbstractFileSystem.java new file mode 100644 index 000000000..449a4a282 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestAbstractFileSystem.java @@ -0,0 +1,336 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.inject.Guice; +import com.google.inject.Inject; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.utils.BackupFileUtils; +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.ParseException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.concurrent.*; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * The goal of this class is to test common functionality which are encapsulated in + * AbstractFileSystem. The actual upload/download of a file to remote file system is beyond the + * scope of this class. Created by aagrawal on 9/22/18. + */ +public class TestAbstractFileSystem { + private Injector injector; + private IConfiguration configuration; + private BackupMetrics backupMetrics; + private BackupNotificationMgr backupNotificationMgr; + private FailureFileSystem failureFileSystem; + private MyFileSystem myFileSystem; + + @Before + public void setBackupMetrics() { + if (injector == null) injector = Guice.createInjector(new BRTestModule()); + + if (configuration == null) configuration = injector.getInstance(IConfiguration.class); + + if (backupNotificationMgr == null) + backupNotificationMgr = injector.getInstance(BackupNotificationMgr.class); + + backupMetrics = injector.getInstance(BackupMetrics.class); + Provider pathProvider = injector.getProvider(AbstractBackupPath.class); + + if (failureFileSystem == null) + failureFileSystem = + new FailureFileSystem( + configuration, backupMetrics, backupNotificationMgr, pathProvider); + + if (myFileSystem == null) + myFileSystem = + new MyFileSystem( + configuration, backupMetrics, backupNotificationMgr, pathProvider); + + BackupFileUtils.cleanupDir(Paths.get(configuration.getDataFileLocation())); + } + + @Test + public void testFailedRetriesUpload() throws Exception { + try { + Collection files = generateFiles(1, 1, 1); + for (File file : files) { + failureFileSystem.uploadAndDelete(getDummyPath(file.toPath()), false /* async */); + } + } catch (BackupRestoreException e) { + // Verify the failure metric for upload is incremented. + Assert.assertEquals(1, (int) backupMetrics.getInvalidUploads().count()); + } + } + + private AbstractBackupPath getDummyPath() throws ParseException { + return getDummyPath(Paths.get(configuration.getDataFileLocation() + "/ks/cf/file-Data.db")); + } + + private AbstractBackupPath getDummyPath(Path localPath) throws ParseException { + AbstractBackupPath path = injector.getInstance(AbstractBackupPath.class); + path.parseLocal(localPath.toFile(), AbstractBackupPath.BackupFileType.SST_V2); + return path; + } + + private Collection generateFiles(int noOfKeyspaces, int noOfCf, int noOfSstables) + throws Exception { + Path dataDir = Paths.get(configuration.getDataFileLocation()); + BackupFileUtils.generateDummyFiles( + dataDir, noOfKeyspaces, noOfCf, noOfSstables, "snapshot", "201812310000", true); + String[] ext = {"db"}; + return FileUtils.listFiles(dataDir.toFile(), ext, true); + } + + @Test + public void testFailedRetriesDownload() throws Exception { + try { + failureFileSystem.downloadFile(getDummyPath(), "", 2); + } catch (BackupRestoreException e) { + // Verify the failure metric for download is incremented. + Assert.assertEquals(1, (int) backupMetrics.getInvalidDownloads().count()); + } + } + + @Test + public void testUpload() throws Exception { + File file = generateFiles(1, 1, 1).iterator().next(); + myFileSystem.uploadAndDelete(getDummyPath(file.toPath()), false /* async */); + Assert.assertEquals(1, (int) backupMetrics.getValidUploads().actualCount()); + Assert.assertFalse(file.exists()); + } + + @Test + public void testDownload() throws Exception { + // Dummy download + myFileSystem.downloadFile(getDummyPath(), "", 2); + // Verify the success metric for download is incremented. + Assert.assertEquals(1, (int) backupMetrics.getValidDownloads().actualCount()); + } + + @Test + public void testAsyncUpload() throws Exception { + File file = generateFiles(1, 1, 1).iterator().next(); + myFileSystem + .uploadAndDelete(getDummyPath(file.toPath()), Instant.EPOCH, true /* async */) + .get(); + Assert.assertEquals(1, (int) backupMetrics.getValidUploads().actualCount()); + Assert.assertEquals(0, myFileSystem.getUploadTasksQueued()); + } + + @Test + public void testAsyncUploadBulk() throws Exception { + // Testing the queue feature works. + // 1. Give 1000 dummy files to upload. File upload takes some random time to upload + Collection files = generateFiles(1, 1, 20); + List> futures = new ArrayList<>(); + for (File file : files) { + futures.add( + myFileSystem.uploadAndDelete( + getDummyPath(file.toPath()), Instant.EPOCH, true /* async */)); + } + + // Verify all the work is finished. + for (Future future : futures) { + future.get(); + } + // 2. Success metric is incremented correctly + Assert.assertEquals(files.size(), (int) backupMetrics.getValidUploads().actualCount()); + + // 3. The task queue is empty after upload is finished. + Assert.assertEquals(0, myFileSystem.getUploadTasksQueued()); + } + + @Test + public void testUploadDedup() throws Exception { + // Testing the de-duping works. + Collection files = generateFiles(1, 1, 1); + File file = files.iterator().next(); + AbstractBackupPath abstractBackupPath = getDummyPath(file.toPath()); + // 1. Give same file to upload x times. Only one request will be entertained. + int size = 10; + ExecutorService threads = Executors.newFixedThreadPool(size); + List> torun = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + torun.add( + () -> { + myFileSystem.uploadAndDelete(abstractBackupPath, false /* async */); + return Boolean.TRUE; + }); + } + + // all tasks executed in different threads, at 'once'. + List> futures = threads.invokeAll(torun); + + // no more need for the threadpool + threads.shutdown(); + for (Future future : futures) { + try { + future.get(); + } catch (InterruptedException | ExecutionException e) { + // Do nothing. + } + } + // 2. Verify the success metric for upload is not same as size, i.e. some amount of + // de-duping happened. + Assert.assertNotEquals(size, (int) backupMetrics.getValidUploads().actualCount()); + } + + @Test + public void testAsyncUploadFailure() throws Exception { + // Testing single async upload. + Collection files = generateFiles(1, 1, 1); + for (File file : files) { + Future future = + failureFileSystem.uploadAndDelete( + getDummyPath(file.toPath()), Instant.EPOCH, true /* async */); + try { + future.get(); + } catch (Exception e) { + // 1. Future get returns error message. + + // 2. Verify the failure metric for upload is incremented. + Assert.assertEquals(1, (int) backupMetrics.getInvalidUploads().count()); + + // 3. The task queue is empty after upload is finished. + Assert.assertEquals(0, failureFileSystem.getUploadTasksQueued()); + break; + } + } + } + + @Test + public void testAsyncDownload() throws Exception { + // Testing single async download. + Future future = myFileSystem.asyncDownloadFile(getDummyPath(), 2); + future.get(); + // 1. Verify the success metric for download is incremented. + Assert.assertEquals(1, (int) backupMetrics.getValidDownloads().actualCount()); + // 2. Verify the queue size is '0' after success. + Assert.assertEquals(0, myFileSystem.getDownloadTasksQueued()); + } + + @Test + public void testAsyncDownloadBulk() throws Exception { + // Testing the queue feature works. + // 1. Give 1000 dummy files to download. File download takes some random time to download. + int totalFiles = 1000; + List> futureList = new ArrayList<>(); + for (int i = 0; i < totalFiles; i++) + futureList.add(myFileSystem.asyncDownloadFile(getDummyPath(Paths.get("" + i)), 2)); + + // Ensure processing is finished. + for (Future future1 : futureList) { + future1.get(); + } + + // 2. Success metric is incremented correctly -> exactly 1000 times. + Assert.assertEquals(totalFiles, (int) backupMetrics.getValidDownloads().actualCount()); + + // 3. The task queue is empty after download is finished. + Assert.assertEquals(0, myFileSystem.getDownloadTasksQueued()); + } + + @Test + public void testAsyncDownloadFailure() throws Exception { + Future future = failureFileSystem.asyncDownloadFile(getDummyPath(), 2); + try { + future.get(); + } catch (Exception e) { + // Verify the failure metric for upload is incremented. + Assert.assertEquals(1, (int) backupMetrics.getInvalidDownloads().count()); + } + } + + class FailureFileSystem extends NullBackupFileSystem { + + @Inject + public FailureFileSystem( + IConfiguration configuration, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + Provider pathProvider) { + super(configuration, backupMetrics, backupNotificationMgr, pathProvider); + } + + @Override + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + throw new BackupRestoreException( + "User injected failure file system error for testing download. Remote path: " + + path.getRemotePath()); + } + + @Override + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + throw new BackupRestoreException( + "User injected failure file system error for testing upload. Local path: " + + path.getBackupFile().getAbsolutePath()); + } + } + + class MyFileSystem extends NullBackupFileSystem { + + private final Random random = new Random(); + + @Inject + public MyFileSystem( + IConfiguration configuration, + BackupMetrics backupMetrics, + BackupNotificationMgr backupNotificationMgr, + Provider pathProvider) { + super(configuration, backupMetrics, backupNotificationMgr, pathProvider); + } + + @Override + protected void downloadFileImpl(AbstractBackupPath path, String suffix) + throws BackupRestoreException { + try { + Thread.sleep(random.nextInt(20)); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + @Override + protected long uploadFileImpl(AbstractBackupPath path, Instant target) + throws BackupRestoreException { + try { + Thread.sleep(random.nextInt(20)); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + return 0; + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackup.java b/priam/src/test/java/com/netflix/priam/backup/TestBackup.java index e5e65a90a..276ace860 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestBackup.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackup.java @@ -17,224 +17,244 @@ package com.netflix.priam.backup; +import com.google.common.collect.Iterators; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Key; +import com.google.inject.name.Names; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.HashSet; +import java.util.Iterator; import java.util.Set; - +import java.util.stream.Stream; import mockit.Mock; import mockit.MockUp; - import org.apache.cassandra.tools.NodeProbe; import org.apache.commons.io.FileUtils; import org.junit.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Key; -import com.google.inject.name.Names; - /** * Unit test case to test a snapshot backup and incremental backup - * + * * @author Praveen Sadhu - * */ -public class TestBackup -{ +public class TestBackup { private static Injector injector; private static FakeBackupFileSystem filesystem; private static final Logger logger = LoggerFactory.getLogger(TestBackup.class); - private static Set expectedFiles = new HashSet(); + private static final Set expectedFiles = new HashSet<>(); @BeforeClass - public static void setup() throws InterruptedException, IOException - { - new MockNodeProbe(); + public static void setup() throws InterruptedException, IOException { + new MockNodeProbe(); injector = Guice.createInjector(new BRTestModule()); - filesystem = (FakeBackupFileSystem) injector.getInstance(Key.get(IBackupFileSystem.class,Names.named("backup"))); + filesystem = + (FakeBackupFileSystem) + injector.getInstance( + Key.get(IBackupFileSystem.class, Names.named("backup"))); } - + @AfterClass - public static void cleanup() throws IOException - { + public static void cleanup() throws IOException { File file = new File("target/data"); FileUtils.deleteQuietly(file); } @Test - public void testSnapshotBackup() throws Exception - { - filesystem.setupTest(); + public void testSnapshotBackup() throws Exception { + filesystem.cleanup(); SnapshotBackup backup = injector.getInstance(SnapshotBackup.class); -// -// backup.execute(); -// Assert.assertEquals(3, filesystem.uploadedFiles.size()); -// System.out.println("***** "+filesystem.uploadedFiles.size()); -// boolean metafile = false; -// for (String filePath : expectedFiles) -// Assert.assertTrue(filesystem.uploadedFiles.contains(filePath)); -// -// for(String filepath : filesystem.uploadedFiles){ -// if( filepath.endsWith("meta.json")){ -// metafile = true; -// break; -// } -// } -// Assert.assertTrue(metafile); + // + // backup.execute(); + // Assert.assertEquals(3, filesystem.uploadedFiles.size()); + // System.out.println("***** "+filesystem.uploadedFiles.size()); + // boolean metafile = false; + // for (String filePath : expectedFiles) + // Assert.assertTrue(filesystem.uploadedFiles.contains(filePath)); + // + // for(String filepath : filesystem.uploadedFiles){ + // if( filepath.endsWith("meta.json")){ + // metafile = true; + // break; + // } + // } + // Assert.assertTrue(metafile); } @Test - public void testIncrementalBackup() throws Exception - { - filesystem.setupTest(); + public void testIncrementalBackup() throws Exception { + filesystem.cleanup(); generateIncrementalFiles(); IncrementalBackup backup = injector.getInstance(IncrementalBackup.class); backup.execute(); Assert.assertEquals(5, filesystem.uploadedFiles.size()); for (String filePath : expectedFiles) Assert.assertTrue(filesystem.uploadedFiles.contains(filePath)); + try (Stream entries = + Files.list(Paths.get("target/data/Keyspace1/Standard1/backups/"))) { + Assert.assertEquals(0, entries.count()); + } } @Test - public void testClusterSpecificColumnFamiliesSkippedBefore21() throws Exception - { - String[] columnFamilyDirs = {"schema_columns","local", "peers", "LocationInfo"}; + public void testIncrementalBackupOfSecondaryIndexes() throws Exception { + filesystem.cleanup(); + generateIncrementalFiles(); + IncrementalBackup backup = injector.getInstance(IncrementalBackup.class); + File secondaryIndexBackupDir = + new File("target/data/Keyspace1/Standard1/backups/.STANDARD1_field1_idx_1/"); + Assert.assertTrue(secondaryIndexBackupDir.exists()); + backup.execute(); + Iterator paths = + filesystem.listFileSystem("casstestbackup", "/", null /* marker */); + String path = + Iterators.find(paths, p -> p.endsWith("Keyspace1-Standard1-ia-4-Data.db"), null); + Assert.assertNotNull(path); + Assert.assertTrue( + path.contains(AbstractBackupPath.BackupFileType.SECONDARY_INDEX_V2.name())); + Assert.assertFalse(secondaryIndexBackupDir.exists()); + } + + @Test + public void testClusterSpecificColumnFamiliesSkippedBefore21() throws Exception { + String[] columnFamilyDirs = {"schema_columns", "local", "peers", "LocationInfo"}; testClusterSpecificColumnFamiliesSkipped(columnFamilyDirs); } @Test - public void testClusterSpecificColumnFamiliesSkippedFrom21() throws Exception - { - String[] columnFamilyDirs = {"schema_columns-296e9c049bec30c5828dc17d3df2132a", - "local-7ad54392bcdd45d684174c047860b347", - "peers-37c71aca7ac2383ba74672528af04d4f", - "LocationInfo-9f5c6374d48633299a0a5094bf9ad1e4"}; + public void testClusterSpecificColumnFamiliesSkippedFrom21() throws Exception { + String[] columnFamilyDirs = { + "schema_columns-296e9c049bec30c5828dc17d3df2132a", + "local-7ad54392bcdd45d684174c047860b347", + "peers-37c71aca7ac2383ba74672528af04d4f", + "LocationInfo-9f5c6374d48633299a0a5094bf9ad1e4" + }; testClusterSpecificColumnFamiliesSkipped(columnFamilyDirs); } - private void testClusterSpecificColumnFamiliesSkipped(String[] columnFamilyDirs) throws Exception - { - filesystem.setupTest(); + private void testClusterSpecificColumnFamiliesSkipped(String[] columnFamilyDirs) + throws Exception { + filesystem.cleanup(); File tmp = new File("target/data/"); - if (tmp.exists()) - cleanup(tmp); + if (tmp.exists()) cleanup(tmp); // Generate "data" generateIncrementalFiles(); - Set systemfiles = new HashSet(); + Set systemfiles = new HashSet<>(); // Generate system files - for (String columnFamilyDir: columnFamilyDirs) { + for (String columnFamilyDir : columnFamilyDirs) { String columnFamily = columnFamilyDir.split("-")[0]; - systemfiles.add(String.format("target/data/system/%s/backups/system-%s-ka-1-Data.db", columnFamilyDir, columnFamily)); - systemfiles.add(String.format("target/data/system/%s/backups/system-%s-ka-1-Index.db", columnFamilyDir, columnFamily)); + systemfiles.add( + String.format( + "target/data/system/%s/backups/system-%s-ka-1-Data.db", + columnFamilyDir, columnFamily)); + systemfiles.add( + String.format( + "target/data/system/%s/backups/system-%s-ka-1-Index.db", + columnFamilyDir, columnFamily)); } - for (String systemFilePath: systemfiles) - { + for (String systemFilePath : systemfiles) { File file = new File(systemFilePath); genTestFile(file); - //Not cluster specific columns should be backed up - if(systemFilePath.contains("schema_columns")) + // Not cluster specific columns should be backed up + if (systemFilePath.contains("schema_columns")) expectedFiles.add(file.getAbsolutePath()); } IncrementalBackup backup = injector.getInstance(IncrementalBackup.class); backup.execute(); - Assert.assertEquals(8, filesystem.uploadedFiles.size()); + Assert.assertEquals(7, filesystem.uploadedFiles.size()); for (String filePath : expectedFiles) Assert.assertTrue(filesystem.uploadedFiles.contains(filePath)); } - private static void generateIncrementalFiles() - { + private static void generateIncrementalFiles() { File tmp = new File("target/data/"); - if (tmp.exists()) - cleanup(tmp); + if (tmp.exists()) cleanup(tmp); // Setup - Set files = new HashSet(); + Set files = new HashSet<>(); files.add("target/data/Keyspace1/Standard1/backups/Keyspace1-Standard1-ia-1-Data.db"); files.add("target/data/Keyspace1/Standard1/backups/Keyspace1-Standard1-ia-1-Index.db"); files.add("target/data/Keyspace1/Standard1/backups/Keyspace1-Standard1-ia-2-Data.db"); files.add("target/data/Keyspace1/Standard1/backups/Keyspace1-Standard1-ia-3-Data.db"); + // purposely testing case mismatch in secondary index directory which can exist in practice + files.add( + "target/data/Keyspace1/Standard1/backups/.STANDARD1_field1_idx_1/Keyspace1-Standard1-ia-4-Data.db"); + File fileToSkip = + new File( + "target/data/Keyspace1/Standard1/backups/.foo/Keyspace1-Standard1-ia-5-Data.db"); + if (!fileToSkip.exists()) fileToSkip.mkdirs(); + File siDir = + new File( + "target/data/Keyspace1/Standard1/.STANDARD1_field1_idx_1/Keyspace1-Standard1-ia-5-Data.db"); + if (!siDir.exists()) siDir.mkdirs(); expectedFiles.clear(); - for (String filePath : files) - { + for (String filePath : files) { File file = new File(filePath); genTestFile(file); expectedFiles.add(file.getAbsolutePath()); } } - private static void genTestFile(File file) - { - try - { + private static void genTestFile(File file) { + try { File parent = file.getParentFile(); - if (!parent.exists()) - parent.mkdirs(); + if (!parent.exists()) parent.mkdirs(); BufferedOutputStream bos1 = new BufferedOutputStream(new FileOutputStream(file)); - for (long i = 0; i < (5L * 1024); i++) - bos1.write((byte) 8); + for (long i = 0; i < (5L * 1024); i++) bos1.write((byte) 8); bos1.flush(); bos1.close(); - } - catch (Exception e) - { + } catch (Exception e) { logger.error(e.getMessage()); } } - private static void cleanup(File dir) - { + private static void cleanup(File dir) { FileUtils.deleteQuietly(dir); } // Mock Nodeprobe class @Ignore - static class MockNodeProbe extends MockUp - { - @Mock - public void $init(String host, int port) throws IOException, InterruptedException - { - } + static class MockNodeProbe extends MockUp { @Mock - public void takeSnapshot(String snapshotName, String columnFamily, String... keyspaces) throws IOException - { + public void takeSnapshot(String snapshotName, String columnFamily, String... keyspaces) { File tmp = new File("target/data/"); - if (tmp.exists()) - cleanup(tmp); + if (tmp.exists()) cleanup(tmp); // Setup - Set files = new HashSet(); - files.add("target/data/Keyspace1/Standard1/snapshots/" + snapshotName + "/Keyspace1-Standard1-ia-5-Data.db"); - files.add("target/data/Keyspace1/Standard1/snapshots/201101081230/Keyspace1-Standard1-ia-6-Data.db"); - files.add("target/data/Keyspace1/Standard1/snapshots/" + snapshotName + "/Keyspace1-Standard1-ia-7-Data.db"); + Set files = new HashSet<>(); + files.add( + "target/data/Keyspace1/Standard1/snapshots/" + + snapshotName + + "/Keyspace1-Standard1-ia-5-Data.db"); + files.add( + "target/data/Keyspace1/Standard1/snapshots/201101081230/Keyspace1-Standard1-ia-6-Data.db"); + files.add( + "target/data/Keyspace1/Standard1/snapshots/" + + snapshotName + + "/Keyspace1-Standard1-ia-7-Data.db"); expectedFiles.clear(); - for (String filePath : files) - { + for (String filePath : files) { File file = new File(filePath); genTestFile(file); - if (filePath.indexOf("Keyspace1-Standard1-ia-6-Data.db") == -1)// skip - expectedFiles.add(file.getAbsolutePath()); + if (!filePath.contains("Keyspace1-Standard1-ia-6-Data.db")) // skip + expectedFiles.add(file.getAbsolutePath()); } } @Mock - public void close() throws IOException - { - } - - @Mock - public void clearSnapshot(String tag, String... keyspaces) throws IOException - { + public void clearSnapshot(String tag, String... keyspaces) { cleanup(new File("target/data")); } } - -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupDynamicRateLimiter.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupDynamicRateLimiter.java new file mode 100644 index 000000000..cc754b58c --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupDynamicRateLimiter.java @@ -0,0 +1,160 @@ +package com.netflix.priam.backup; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableMap; +import com.google.common.truth.Truth; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.aws.RemoteBackupPath; +import com.netflix.priam.config.FakeConfiguration; +import java.nio.file.Paths; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; + +public class TestBackupDynamicRateLimiter { + private static final Instant NOW = Instant.ofEpochMilli(1 << 16); + private static final Instant LATER = NOW.plusMillis(Duration.ofHours(1).toMillis()); + private static final int DIR_SIZE = 1 << 16; + + private BackupDynamicRateLimiter rateLimiter; + private FakeConfiguration config; + private Injector injector; + + @Before + public void setUp() { + injector = Guice.createInjector(new BRTestModule()); + config = injector.getInstance(FakeConfiguration.class); + } + + @Test + public void sunnyDay() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + Stopwatch timer = timePermitAcquisition(getBackupPath(), LATER, 21); + Truth.assertThat(timer.elapsed(TimeUnit.MILLISECONDS)).isAtLeast(1_000); + Truth.assertThat(timer.elapsed(TimeUnit.MILLISECONDS)).isAtMost(2_000); + } + + @Test + public void targetSetToEpoch() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + Stopwatch timer = timePermitAcquisition(getBackupPath(), Instant.EPOCH, 20); + assertNoRateLimiting(timer); + } + + @Test + public void pathIsNotASnapshot() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + AbstractBackupPath path = + getBackupPath( + "target/data/Keyspace1/Standard1/backups/Keyspace1-Standard1-ia-4-Data.db"); + Stopwatch timer = timePermitAcquisition(path, LATER, 20); + assertNoRateLimiting(timer); + } + + @Test + public void targetIsNow() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + Stopwatch timer = timePermitAcquisition(getBackupPath(), NOW, 20); + assertNoRateLimiting(timer); + } + + @Test + public void targetIsInThePast() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + Instant target = NOW.minus(Duration.ofHours(1L)); + Stopwatch timer = timePermitAcquisition(getBackupPath(), target, 20); + assertNoRateLimiting(timer); + } + + @Test + public void noBackupThreads() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 0), NOW, DIR_SIZE); + assertThrows( + IllegalStateException.class, + () -> timePermitAcquisition(getBackupPath(), LATER, 20)); + } + + @Test + public void negativeBackupThreads() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", -1), NOW, DIR_SIZE); + assertThrows( + IllegalStateException.class, + () -> timePermitAcquisition(getBackupPath(), LATER, 20)); + } + + @Test + public void noData() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, 0); + Stopwatch timer = timePermitAcquisition(getBackupPath(), LATER, 20); + assertNoRateLimiting(timer); + } + + @Test + public void noPermitsRequested() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + assertThrows( + IllegalArgumentException.class, + () -> timePermitAcquisition(getBackupPath(), LATER, 0)); + } + + @Test + public void negativePermitsRequested() { + rateLimiter = getRateLimiter(ImmutableMap.of("Priam.backup.threads", 1), NOW, DIR_SIZE); + assertThrows( + IllegalArgumentException.class, + () -> timePermitAcquisition(getBackupPath(), LATER, -1)); + } + + private RemoteBackupPath getBackupPath() { + return getBackupPath( + "target/data/Keyspace1/Standard1/snapshots/snap_v2_202201010000/.STANDARD1_field1_idx_1/Keyspace1-Standard1-ia-4-Data.db"); + } + + private RemoteBackupPath getBackupPath(String filePath) { + RemoteBackupPath path = injector.getInstance(RemoteBackupPath.class); + path.parseLocal(Paths.get(filePath).toFile(), AbstractBackupPath.BackupFileType.SST_V2); + return path; + } + + private Stopwatch timePermitAcquisition(AbstractBackupPath path, Instant now, int permits) { + rateLimiter.acquire(path, now, permits); // Do this once first or else it won't throttle. + Stopwatch timer = Stopwatch.createStarted(); + rateLimiter.acquire(path, now, permits); + timer.stop(); + return timer; + } + + private BackupDynamicRateLimiter getRateLimiter( + Map properties, Instant now, long directorySize) { + properties.forEach(config::setFakeConfig); + return new BackupDynamicRateLimiter( + config, + Clock.fixed(now, ZoneId.systemDefault()), + new FakeDirectorySize(directorySize)); + } + + private void assertNoRateLimiting(Stopwatch timer) { + Truth.assertThat(timer.elapsed(TimeUnit.MILLISECONDS)).isAtMost(1); + } + + private static final class FakeDirectorySize implements DirectorySize { + private final long size; + + FakeDirectorySize(long size) { + this.size = size; + } + + @Override + public long getBytes(String location) { + return size; + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupFile.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupFile.java index 2fc5f1213..52478f3f6 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestBackupFile.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupFile.java @@ -17,111 +17,116 @@ package com.netflix.priam.backup; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.aws.RemoteBackupPath; +import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.utils.DateUtil; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.sql.Date; import java.text.ParseException; - import org.apache.commons.io.FileUtils; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import org.junit.Assert; - -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.aws.S3BackupPath; -import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; -import com.netflix.priam.identity.InstanceIdentity; -public class TestBackupFile -{ +public class TestBackupFile { private static Injector injector; + private static String region; @BeforeClass - public static void setup() throws IOException - { + public static void setup() throws IOException { injector = Guice.createInjector(new BRTestModule()); - File file = new File("target/data/Keyspace1/Standard1/", "Keyspace1-Standard1-ia-5-Data.db"); - if (!file.exists()) - { + File file = + new File("target/data/Keyspace1/Standard1/", "Keyspace1-Standard1-ia-5-Data.db"); + if (!file.exists()) { File dir1 = new File("target/data/Keyspace1/Standard1/"); - if (!dir1.exists()) - dir1.mkdirs(); + if (!dir1.exists()) dir1.mkdirs(); byte b = 8; long oneKB = (1024L); System.out.println(oneKB); BufferedOutputStream bos1 = new BufferedOutputStream(new FileOutputStream(file)); - for (long i = 0; i < oneKB; i++) - { + for (long i = 0; i < oneKB; i++) { bos1.write(b); } bos1.flush(); bos1.close(); } InstanceIdentity factory = injector.getInstance(InstanceIdentity.class); - factory.getInstance().setToken("1234567");//Token + factory.getInstance().setToken("1234567"); // Token + region = factory.getInstanceInfo().getRegion(); } @AfterClass - public static void cleanup() throws IOException - { + public static void cleanup() throws IOException { File file = new File("Keyspace1-Standard1-ia-5-Data.db"); FileUtils.deleteQuietly(file); } @Test - public void testBackupFileCreation() throws ParseException - { + public void testBackupFileCreation() throws ParseException { // Test snapshot file - String snapshotfile = "target/data/Keyspace1/Standard1/snapshots/201108082320/Keyspace1-Standard1-ia-5-Data.db"; - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); + String snapshotfile = + "target/data/Keyspace1/Standard1/snapshots/201108082320/Keyspace1-Standard1-ia-5-Data.db"; + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); backupfile.parseLocal(new File(snapshotfile), BackupFileType.SNAP); Assert.assertEquals(BackupFileType.SNAP, backupfile.type); Assert.assertEquals("Keyspace1", backupfile.keyspace); Assert.assertEquals("Standard1", backupfile.columnFamily); Assert.assertEquals("1234567", backupfile.token); Assert.assertEquals("fake-app", backupfile.clusterName); - Assert.assertEquals(FakeConfiguration.FAKE_REGION, backupfile.region); + Assert.assertEquals(region, backupfile.region); Assert.assertEquals("casstestbackup", backupfile.baseDir); - Assert.assertEquals("casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/1234567/201108082320/SNAP/Keyspace1/Standard1/Keyspace1-Standard1-ia-5-Data.db", backupfile.getRemotePath()); + Assert.assertEquals( + "casstestbackup/" + + region + + "/fake-app/1234567/201108082320/SNAP/Keyspace1/Standard1/Keyspace1-Standard1-ia-5-Data.db", + backupfile.getRemotePath()); } @Test - public void testIncBackupFileCreation() throws ParseException - { - // Test incremental file + public void testIncBackupFileCreation() throws ParseException { + // Test incremental file File bfile = new File("target/data/Keyspace1/Standard1/Keyspace1-Standard1-ia-5-Data.db"); - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); backupfile.parseLocal(bfile, BackupFileType.SST); Assert.assertEquals(BackupFileType.SST, backupfile.type); Assert.assertEquals("Keyspace1", backupfile.keyspace); Assert.assertEquals("Standard1", backupfile.columnFamily); Assert.assertEquals("1234567", backupfile.token); Assert.assertEquals("fake-app", backupfile.clusterName); - Assert.assertEquals(FakeConfiguration.FAKE_REGION, backupfile.region); + Assert.assertEquals(region, backupfile.region); Assert.assertEquals("casstestbackup", backupfile.baseDir); - String datestr = AbstractBackupPath.formatDate(new Date(bfile.lastModified())); - Assert.assertEquals("casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/1234567/" + datestr + "/SST/Keyspace1/Standard1/Keyspace1-Standard1-ia-5-Data.db", backupfile.getRemotePath()); + String datestr = DateUtil.formatyyyyMMddHHmm(new Date(bfile.lastModified())); + Assert.assertEquals( + "casstestbackup/" + + region + + "/fake-app/1234567/" + + datestr + + "/SST/Keyspace1/Standard1/Keyspace1-Standard1-ia-5-Data.db", + backupfile.getRemotePath()); } @Test - public void testMetaFileCreation() throws ParseException - { + public void testMetaFileCreation() throws ParseException { // Test snapshot file String filestr = "cass/data/1234567.meta"; File bfile = new File(filestr); - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); - backupfile.time = backupfile.parseDate("201108082320"); + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); backupfile.parseLocal(bfile, BackupFileType.META); + backupfile.setTime(DateUtil.getDate("201108082320")); Assert.assertEquals(BackupFileType.META, backupfile.type); Assert.assertEquals("1234567", backupfile.token); Assert.assertEquals("fake-app", backupfile.clusterName); - Assert.assertEquals(FakeConfiguration.FAKE_REGION, backupfile.region); + Assert.assertEquals(region, backupfile.region); Assert.assertEquals("casstestbackup", backupfile.baseDir); - Assert.assertEquals("casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/1234567/201108082320/META/1234567.meta", backupfile.getRemotePath()); + Assert.assertEquals( + "casstestbackup/" + region + "/fake-app/1234567/201108082320/META/1234567.meta", + backupfile.getRemotePath()); } } diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupHelperImpl.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupHelperImpl.java new file mode 100644 index 000000000..224633efd --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupHelperImpl.java @@ -0,0 +1,211 @@ +package com.netflix.priam.backup; + +import com.google.common.collect.ImmutableList; +import com.google.common.truth.Truth; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.config.BackupsToCompress; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.config.IConfiguration; +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.Collection; +import java.util.Objects; +import org.apache.commons.io.FileUtils; +import org.junit.*; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Enclosed.class) +public class TestBackupHelperImpl { + private static final String COMPRESSED_DATA = "compressed-1234-Data.db"; + private static final String COMPRESSION_INFO = "compressed-1234-CompressionInfo.db"; + private static final String UNCOMPRESSED_DATA = "uncompressed-1234-Data.db"; + private static final String RANDOM_DATA = "random-1234-Data.db"; + private static final String RANDOM_COMPONENT = "random-1234-compressioninfo.db"; + private static final ImmutableList TABLE_PARTS = + ImmutableList.of( + COMPRESSED_DATA, + COMPRESSION_INFO, + UNCOMPRESSED_DATA, + RANDOM_DATA, + RANDOM_COMPONENT); + + private static final String DIRECTORY = "target/data/ks/cf/backup/"; + + @RunWith(Parameterized.class) + public static class ParameterizedTests { + private final BackupHelperImpl backupHelper; + private final String tablePart; + private final CompressionType compressionAlgorithm; + + @BeforeClass + public static void setUp() throws IOException { + FileUtils.forceMkdir(new File(DIRECTORY)); + } + + @Before + public void createFiles() throws IOException { + for (String tablePart : TABLE_PARTS) { + File file = Paths.get(DIRECTORY, tablePart).toFile(); + if (file.createNewFile()) { + FileUtils.forceDeleteOnExit(file); + } else { + throw new IllegalStateException("failed to create " + tablePart); + } + } + } + + @AfterClass + public static void tearDown() throws IOException { + FileUtils.deleteDirectory(new File(DIRECTORY)); + } + + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList( + new Object[][] { + {BackupsToCompress.NONE, COMPRESSED_DATA, CompressionType.NONE}, + {BackupsToCompress.NONE, COMPRESSION_INFO, CompressionType.NONE}, + {BackupsToCompress.NONE, UNCOMPRESSED_DATA, CompressionType.NONE}, + {BackupsToCompress.NONE, RANDOM_DATA, CompressionType.NONE}, + {BackupsToCompress.NONE, RANDOM_COMPONENT, CompressionType.NONE}, + {BackupsToCompress.ALL, COMPRESSED_DATA, CompressionType.SNAPPY}, + {BackupsToCompress.ALL, COMPRESSION_INFO, CompressionType.SNAPPY}, + {BackupsToCompress.ALL, UNCOMPRESSED_DATA, CompressionType.SNAPPY}, + {BackupsToCompress.ALL, RANDOM_DATA, CompressionType.SNAPPY}, + {BackupsToCompress.ALL, RANDOM_COMPONENT, CompressionType.SNAPPY}, + {BackupsToCompress.IF_REQUIRED, COMPRESSED_DATA, CompressionType.NONE}, + {BackupsToCompress.IF_REQUIRED, COMPRESSION_INFO, CompressionType.NONE}, + {BackupsToCompress.IF_REQUIRED, UNCOMPRESSED_DATA, CompressionType.SNAPPY}, + {BackupsToCompress.IF_REQUIRED, RANDOM_DATA, CompressionType.SNAPPY}, + {BackupsToCompress.IF_REQUIRED, RANDOM_COMPONENT, CompressionType.SNAPPY}, + }); + } + + public ParameterizedTests(BackupsToCompress which, String tablePart, CompressionType algo) { + this.tablePart = tablePart; + this.compressionAlgorithm = algo; + Injector injector = Guice.createInjector(new BRTestModule()); + FakeConfiguration fakeConfiguration = + (FakeConfiguration) injector.getInstance(IConfiguration.class); + fakeConfiguration.setFakeConfig("Priam.backupsToCompress", which); + IFileSystemContext context = injector.getInstance(IFileSystemContext.class); + Provider pathFactory = + injector.getProvider(AbstractBackupPath.class); + backupHelper = new BackupHelperImpl(fakeConfiguration, context, pathFactory); + } + + @Test + public void testCorrectCompressionType() throws Exception { + File parent = new File(DIRECTORY); + AbstractBackupPath.BackupFileType backupFileType = + AbstractBackupPath.BackupFileType.SST_V2; + ImmutableList> futures = + backupHelper.uploadAndDeleteAllFiles(parent, backupFileType, false); + AbstractBackupPath abstractBackupPath = null; + for (ListenableFuture future : futures) { + if (future.get().getFileName().equals(tablePart)) { + abstractBackupPath = future.get(); + break; + } + } + Truth.assertThat(Objects.requireNonNull(abstractBackupPath).getCompression()) + .isEqualTo(compressionAlgorithm); + } + } + + public static class ProgrammaticTests { + private final BackupHelperImpl backupHelper; + private final FakeConfiguration config; + + @BeforeClass + public static void setUp() throws IOException { + FileUtils.forceMkdir(new File(DIRECTORY)); + for (String tablePart : TABLE_PARTS) { + File file = Paths.get(DIRECTORY, tablePart).toFile(); + if (file.createNewFile()) { + FileUtils.forceDeleteOnExit(file); + } else { + throw new IllegalStateException("failed to create " + tablePart); + } + } + } + + @AfterClass + public static void tearDown() throws IOException { + FileUtils.deleteDirectory(new File(DIRECTORY)); + } + + public ProgrammaticTests() { + Injector injector = Guice.createInjector(new BRTestModule()); + config = (FakeConfiguration) injector.getInstance(IConfiguration.class); + IFileSystemContext context = injector.getInstance(IFileSystemContext.class); + Provider pathFactory = + injector.getProvider(AbstractBackupPath.class); + backupHelper = new BackupHelperImpl(config, context, pathFactory); + } + + @Test + public void testDataFilesAreLast() throws IOException { + AbstractBackupPath.BackupFileType fileType = AbstractBackupPath.BackupFileType.SST_V2; + boolean dataFilesAreLast = + backupHelper + .getBackupPaths(new File(DIRECTORY), fileType) + .asList() + .stream() + .skip(2) + .allMatch(p -> p.getBackupFile().getName().endsWith("-Data.db")); + Truth.assertThat(dataFilesAreLast).isTrue(); + } + + @Test + public void testNonDataFilesComeFirst() throws IOException { + AbstractBackupPath.BackupFileType fileType = AbstractBackupPath.BackupFileType.SST_V2; + boolean nonDataFilesComeFirst = + backupHelper + .getBackupPaths(new File(DIRECTORY), fileType) + .asList() + .stream() + .limit(2) + .noneMatch(p -> p.getBackupFile().getName().endsWith("-Data.db")); + Truth.assertThat(nonDataFilesComeFirst).isTrue(); + } + + @Test + public void testNeverCompressedOldFilesAreCompressed() throws IOException { + AbstractBackupPath.BackupFileType fileType = AbstractBackupPath.BackupFileType.SST_V2; + long transitionInstant = Instant.now().plus(1, ChronoUnit.DAYS).toEpochMilli(); + config.setCompressionTransitionEpochMillis(transitionInstant); + config.setFakeConfig("Priam.backupsToCompress", BackupsToCompress.NONE); + boolean backupsAreCompressed = + backupHelper + .getBackupPaths(new File(DIRECTORY), fileType) + .stream() + .allMatch(p -> p.getCompression() == CompressionType.SNAPPY); + Truth.assertThat(backupsAreCompressed).isTrue(); + } + + @Test + public void testOptionallyCompressedOldFilesAreCompressed() throws IOException { + AbstractBackupPath.BackupFileType fileType = AbstractBackupPath.BackupFileType.SST_V2; + long transitionInstant = Instant.now().plus(1, ChronoUnit.DAYS).toEpochMilli(); + config.setCompressionTransitionEpochMillis(transitionInstant); + config.setFakeConfig("Priam.backupsToCompress", BackupsToCompress.IF_REQUIRED); + boolean backupsAreCompressed = + backupHelper + .getBackupPaths(new File(DIRECTORY), fileType) + .stream() + .allMatch(p -> p.getCompression() == CompressionType.SNAPPY); + Truth.assertThat(backupsAreCompressed).isTrue(); + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupService.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupService.java new file mode 100644 index 000000000..ed4a34978 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupService.java @@ -0,0 +1,183 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.tuner.CassandraTunerService; +import com.netflix.priam.tuner.TuneCassandra; +import com.netflix.priam.utils.BackupFileUtils; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.Set; +import mockit.Expectations; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.quartz.SchedulerException; + +/** Created by aagrawal on 3/10/19. */ +public class TestBackupService { + private final PriamScheduler scheduler; + private final CassandraTunerService cassandraTunerService; + + public TestBackupService() { + Injector injector = Guice.createInjector(new BRTestModule()); + this.scheduler = injector.getInstance(PriamScheduler.class); + this.cassandraTunerService = injector.getInstance(CassandraTunerService.class); + } + + @Before + public void cleanup() throws SchedulerException { + scheduler.getScheduler().clear(); + } + + @Test + public void testBackupDisabled( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + new Expectations() { + { + configuration.getBackupCronExpression(); + result = "-1"; + configuration.getDataFileLocation(); + result = "target/data"; + } + }; + + Path dummyDataDirectoryLocation = Paths.get(configuration.getDataFileLocation()); + Instant snapshotInstant = DateUtil.getInstant(); + + // Create one V1 snapshot. + String snapshotV1Name = DateUtil.formatInstant(DateUtil.yyyyMMdd, snapshotInstant); + BackupFileUtils.generateDummyFiles( + dummyDataDirectoryLocation, + 2, + 3, + 3, + AbstractBackup.SNAPSHOT_FOLDER, + snapshotV1Name, + true); + + String snapshotName = "meta_v2_" + snapshotV1Name; + // Create one V2 snapshot. + BackupFileUtils.generateDummyFiles( + dummyDataDirectoryLocation, + 2, + 3, + 3, + AbstractBackup.SNAPSHOT_FOLDER, + snapshotName, + false); + + IService backupService = + new BackupService( + configuration, backupRestoreConfig, scheduler, cassandraTunerService); + backupService.scheduleService(); + Assert.assertEquals(0, scheduler.getScheduler().getJobKeys(null).size()); + + // snapshot V1 name should not be there. + Set backupPaths = + AbstractBackup.getBackupDirectories(configuration, AbstractBackup.SNAPSHOT_FOLDER); + for (Path backupPath : backupPaths) { + Assert.assertTrue(Files.exists(Paths.get(backupPath.toString(), snapshotName))); + Assert.assertFalse(Files.exists(Paths.get(backupPath.toString(), snapshotV1Name))); + } + } + + @Test + public void testBackupEnabled( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + new Expectations() { + { + configuration.getBackupCronExpression(); + result = "0 0/1 * 1/1 * ? *"; + configuration.isIncrementalBackupEnabled(); + result = false; + } + }; + IService backupService = + new BackupService( + configuration, backupRestoreConfig, scheduler, cassandraTunerService); + backupService.scheduleService(); + Assert.assertEquals(2, scheduler.getScheduler().getJobKeys(null).size()); + } + + @Test + public void testBackupEnabledWithIncremental( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + new Expectations() { + { + configuration.getBackupCronExpression(); + result = "0 0/1 * 1/1 * ? *"; + configuration.isIncrementalBackupEnabled(); + result = true; + } + }; + IService backupService = + new BackupService( + configuration, backupRestoreConfig, scheduler, cassandraTunerService); + backupService.scheduleService(); + Assert.assertEquals(3, scheduler.getScheduler().getJobKeys(null).size()); + } + + @Test + public void updateService( + @Mocked IConfiguration configuration, + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Mocked JMXNodeTool nodeTool, + @Mocked TuneCassandra tuneCassandra) + throws Exception { + new Expectations() { + { + configuration.getBackupCronExpression(); + result = "0 0/1 * 1/1 * ? *"; + result = "0 0/1 * 1/1 * ? *"; + result = "-1"; + result = "-1"; + configuration.isIncrementalBackupEnabled(); + result = true; + backupRestoreConfig.enableV2Backups(); + result = true; + backupRestoreConfig.getSnapshotMetaServiceCronExpression(); + result = "0 0/1 * 1/1 * ? *"; + } + }; + IService backupService = + new BackupService( + configuration, backupRestoreConfig, scheduler, cassandraTunerService); + backupService.scheduleService(); + Assert.assertEquals(3, scheduler.getScheduler().getJobKeys(null).size()); + + System.out.println("After updated"); + backupService.onChangeUpdateService(); + System.out.println(scheduler.getScheduler().getJobKeys(null)); + Assert.assertEquals(2, scheduler.getScheduler().getJobKeys(null).size()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupStatusMgr.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupStatusMgr.java new file mode 100644 index 000000000..b7635a59a --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupStatusMgr.java @@ -0,0 +1,252 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.DateUtil.DateRange; +import java.io.File; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Calendar; +import java.util.Date; +import java.util.List; +import java.util.Optional; +import org.apache.commons.io.FileUtils; +import org.joda.time.DateTime; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 7/11/17. */ +public class TestBackupStatusMgr { + private static final Logger logger = LoggerFactory.getLogger(TestBackupStatusMgr.class); + private static IConfiguration configuration; + private static IBackupStatusMgr backupStatusMgr; + private final String backupDate = "201812011000"; + + @BeforeClass + public static void setup() { + Injector injector = Guice.createInjector(new BRTestModule()); + // cleanup old saved file, if any + configuration = injector.getInstance(IConfiguration.class); + backupStatusMgr = injector.getInstance(IBackupStatusMgr.class); + } + + @Before + @After + public void cleanup() { + FileUtils.deleteQuietly(new File(configuration.getBackupStatusFileLoc())); + } + + private void prepare() throws Exception { + cleanup(); + Instant start = DateUtil.parseInstant(backupDate); + backupStatusMgr.finish(getBackupMetaData(start, Status.FINISHED)); + backupStatusMgr.failed(getBackupMetaData(start.plus(2, ChronoUnit.HOURS), Status.FAILED)); + backupStatusMgr.finish(getBackupMetaData(start.plus(4, ChronoUnit.HOURS), Status.FINISHED)); + backupStatusMgr.failed(getBackupMetaData(start.plus(6, ChronoUnit.HOURS), Status.FAILED)); + backupStatusMgr.failed(getBackupMetaData(start.plus(8, ChronoUnit.HOURS), Status.FAILED)); + backupStatusMgr.finish(getBackupMetaData(start.plus(1, ChronoUnit.DAYS), Status.FINISHED)); + backupStatusMgr.finish(getBackupMetaData(start.plus(2, ChronoUnit.DAYS), Status.FINISHED)); + } + + private BackupMetadata getBackupMetaData(Instant startTime, Status status) throws Exception { + BackupMetadata backupMetadata = + new BackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, "123", new Date(startTime.toEpochMilli())); + backupMetadata.setCompleted( + new Date(startTime.plus(30, ChronoUnit.MINUTES).toEpochMilli())); + backupMetadata.setStatus(status); + backupMetadata.setSnapshotLocation("file.txt"); + return backupMetadata; + } + + @Test + public void testSnapshotUpdateMethod() throws Exception { + Date startTime = DateUtil.getDate("198407110720"); + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, "123", startTime); + backupStatusMgr.start(backupMetadata); + Optional backupMetadata1 = + backupStatusMgr.locate(startTime).stream().findFirst(); + Assert.assertNull(backupMetadata1.get().getLastValidated()); + backupMetadata.setLastValidated(Calendar.getInstance().getTime()); + backupMetadata.setCassandraSnapshotSuccess(true); + backupMetadata.setSnapshotLocation("random"); + backupStatusMgr.update(backupMetadata); + backupMetadata1 = backupStatusMgr.locate(startTime).stream().findFirst(); + Assert.assertNotNull(backupMetadata1.get().getLastValidated()); + Assert.assertTrue(backupMetadata1.get().isCassandraSnapshotSuccess()); + Assert.assertEquals("random", backupMetadata1.get().getSnapshotLocation()); + } + + @Test + public void testSnapshotStatusAddFinish() throws Exception { + Date startTime = DateUtil.getDate("198407110720"); + + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, "123", startTime); + backupStatusMgr.start(backupMetadata); + List metadataList = backupStatusMgr.locate(startTime); + Assert.assertNotNull(metadataList); + Assert.assertTrue(!metadataList.isEmpty()); + Assert.assertEquals(1, metadataList.size()); + Assert.assertEquals(startTime, metadataList.get(0).getStart()); + logger.info("Snapshot start: {}", metadataList.get(0)); + + backupStatusMgr.finish(backupMetadata); + metadataList = backupStatusMgr.locate(startTime); + Assert.assertNotNull(metadataList); + Assert.assertTrue(!metadataList.isEmpty()); + Assert.assertEquals(1, metadataList.size()); + Assert.assertEquals(Status.FINISHED, metadataList.get(0).getStatus()); + Assert.assertTrue(metadataList.get(0).getCompleted() != null); + logger.info("Snapshot finished: {}", metadataList.get(0)); + } + + @Test + public void testSnapshotStatusAddFailed() throws Exception { + Date startTime = DateUtil.getDate("198407120720"); + + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, "123", startTime); + backupStatusMgr.start(backupMetadata); + List metadataList = backupStatusMgr.locate(startTime); + Assert.assertNotNull(metadataList); + Assert.assertTrue(!metadataList.isEmpty()); + Assert.assertEquals(1, metadataList.size()); + Assert.assertEquals(startTime, metadataList.get(0).getStart()); + logger.info("Snapshot start: {}", metadataList.get(0)); + + backupStatusMgr.failed(backupMetadata); + metadataList = backupStatusMgr.locate(startTime); + Assert.assertNotNull(metadataList); + Assert.assertTrue(!metadataList.isEmpty()); + Assert.assertEquals(1, metadataList.size()); + Assert.assertEquals(Status.FAILED, metadataList.get(0).getStatus()); + Assert.assertTrue(metadataList.get(0).getCompleted() != null); + logger.info("Snapshot failed: {}", metadataList.get(0)); + } + + @Test + public void testSnapshotStatusMultiAddFinishInADay() throws Exception { + final int noOfEntries = 10; + Date startTime = DateUtil.getDate("19840101"); + + for (int i = 0; i < noOfEntries; i++) { + assert startTime != null; + Date time = new DateTime(startTime.getTime()).plusHours(i).toDate(); + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, "123", time); + backupStatusMgr.start(backupMetadata); + backupStatusMgr.finish(backupMetadata); + } + + List metadataList = backupStatusMgr.locate(startTime); + Assert.assertEquals(noOfEntries, metadataList.size()); + logger.info(metadataList.toString()); + + // Ensure that list is always maintained from latest to eldest + Date latest = null; + for (BackupMetadata backupMetadata : metadataList) { + if (latest == null) latest = backupMetadata.getStart(); + else { + Assert.assertTrue(backupMetadata.getStart().before(latest)); + latest = backupMetadata.getStart(); + } + } + } + + @Test + public void testSnapshotStatusSize() throws Exception { + final int noOfEntries = backupStatusMgr.getCapacity() + 1; + Date startTime = DateUtil.getDate("19850101"); + + for (int i = 0; i < noOfEntries; i++) { + assert startTime != null; + Date time = new DateTime(startTime.getTime()).plusDays(i).toDate(); + BackupMetadata backupMetadata = + new BackupMetadata(BackupVersion.SNAPSHOT_BACKUP, "123", time); + backupStatusMgr.start(backupMetadata); + backupStatusMgr.finish(backupMetadata); + } + + // Verify there is only capacity entries + Assert.assertEquals( + backupStatusMgr.getCapacity(), backupStatusMgr.getAllSnapshotStatus().size()); + } + + @Test + public void getLatestBackup() throws Exception { + prepare(); + Instant start = DateUtil.parseInstant(backupDate); + List list = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange( + backupDate + + "," + + DateUtil.formatInstant( + DateUtil.yyyyMMddHHmm, + start.plus(12, ChronoUnit.HOURS)))); + + Optional backupMetadata = list.stream().findFirst(); + Assert.assertEquals( + start.plus(4, ChronoUnit.HOURS), backupMetadata.get().getStart().toInstant()); + } + + @Test + public void getLatestBackupFailure() throws Exception { + Optional backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + + Assert.assertFalse(backupMetadata.isPresent()); + + backupStatusMgr.failed(getBackupMetaData(DateUtil.parseInstant(backupDate), Status.FAILED)); + backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + Assert.assertFalse(backupMetadata.isPresent()); + } + + @Test + public void getLatestBackupMetadata() throws Exception { + prepare(); + List list = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + "201812031000")); + list.forEach(System.out::println); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestBackupVerification.java b/priam/src/test/java/com/netflix/priam/backup/TestBackupVerification.java new file mode 100644 index 000000000..c14c35c13 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backup/TestBackupVerification.java @@ -0,0 +1,339 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backup; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.backupv2.IMetaProxy; +import com.netflix.priam.backupv2.MetaV1Proxy; +import com.netflix.priam.backupv2.MetaV2Proxy; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.scheduler.UnsupportedTypeException; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.DateUtil.DateRange; +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Date; +import java.util.List; +import java.util.Optional; +import mockit.Mock; +import mockit.MockUp; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** Created by aagrawal on 1/23/19. */ +public class TestBackupVerification { + + private final BackupVerification backupVerification; + private final IConfiguration configuration; + private final IBackupStatusMgr backupStatusMgr; + private final String backupDate = "201812011000"; + private final String backupDateEnd = "201812021000"; + private final Path location = + Paths.get( + "some_bucket/casstestbackup/1049_fake-app/1808575600", + BackupFileType.META_V2.toString(), + "1859817645000", + "SNAPPY", + "PLAINTEXT", + "meta_v2_201812011000.json"); + private final int numFakeBackups = 10; + + public TestBackupVerification() { + Injector injector = Guice.createInjector(new BRTestModule()); + + backupVerification = injector.getInstance(BackupVerification.class); + configuration = injector.getInstance(IConfiguration.class); + backupStatusMgr = injector.getInstance(IBackupStatusMgr.class); + } + + static class MockMetaV1Proxy extends MockUp { + @Mock + public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) { + return getBackupVerificationResult(); + } + } + + static class MockMetaV2Proxy extends MockUp { + @Mock + public BackupVerificationResult isMetaFileValid(AbstractBackupPath metaBackupPath) { + return getBackupVerificationResult(); + } + } + + @Before + @After + public void cleanup() { + new MockMetaV1Proxy(); + new MockMetaV2Proxy(); + FileUtils.deleteQuietly(new File(configuration.getBackupStatusFileLoc())); + } + + @Test + public void illegalDateRange() throws UnsupportedTypeException { + try { + backupVerification.verifyBackup(BackupVersion.SNAPSHOT_BACKUP, false, null); + Assert.assertTrue(false); + } catch (IllegalArgumentException e) { + Assert.assertTrue(true); + } + } + + @Test + public void illegalDateRangeBackupDateRange() throws UnsupportedTypeException { + try { + backupVerification.verifyAllBackups(BackupVersion.SNAPSHOT_BACKUP, null); + Assert.assertTrue(false); + } catch (IllegalArgumentException e) { + Assert.assertTrue(true); + } + } + + @Test + public void noBackup() throws Exception { + Optional backupVerificationResultOptinal = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_BACKUP, + false, + new DateRange(Instant.now(), Instant.now())); + Assert.assertFalse(backupVerificationResultOptinal.isPresent()); + + backupVerificationResultOptinal = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + false, + new DateRange(Instant.now(), Instant.now())); + Assert.assertFalse(backupVerificationResultOptinal.isPresent()); + } + + @Test + public void noBackupDateRange() throws Exception { + List backupVerificationResults = + backupVerification.verifyAllBackups( + BackupVersion.SNAPSHOT_BACKUP, new DateRange(Instant.now(), Instant.now())); + Assert.assertFalse(backupVerificationResults.size() > 0); + + backupVerificationResults = + backupVerification.verifyAllBackups( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(Instant.now(), Instant.now())); + Assert.assertFalse(backupVerificationResults.size() > 0); + } + + private void setUp() throws Exception { + Instant start = DateUtil.parseInstant(backupDate); + for (int i = 0; i < numFakeBackups - 1; i++) { + backupStatusMgr.finish( + getBackupMetaData( + BackupVersion.SNAPSHOT_BACKUP, + start.plus(i + 1, ChronoUnit.MINUTES), + Status.FINISHED)); + } + backupStatusMgr.finish( + getBackupMetaData(BackupVersion.SNAPSHOT_BACKUP, start, Status.FINISHED)); + backupStatusMgr.failed( + getBackupMetaData( + BackupVersion.SNAPSHOT_BACKUP, + start.plus(20, ChronoUnit.MINUTES), + Status.FAILED)); + for (int i = 0; i < numFakeBackups - 1; i++) { + backupStatusMgr.finish( + getBackupMetaData( + BackupVersion.SNAPSHOT_META_SERVICE, + start.plus(i + 1, ChronoUnit.MINUTES), + Status.FINISHED)); + } + backupStatusMgr.finish( + getBackupMetaData(BackupVersion.SNAPSHOT_META_SERVICE, start, Status.FINISHED)); + } + + @Test + public void verifyBackupVersion1() throws Exception { + setUp(); + // Verify for backup version 1.0 + Optional backupVerificationResultOptinal = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_BACKUP, + false, + new DateRange(backupDate + "," + backupDate)); + Assert.assertTrue(backupVerificationResultOptinal.isPresent()); + Assert.assertEquals(Instant.EPOCH, backupVerificationResultOptinal.get().snapshotInstant); + Optional backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + Assert.assertTrue(backupMetadata.isPresent()); + Assert.assertNotNull(backupMetadata.get().getLastValidated()); + + backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + Assert.assertTrue(backupMetadata.isPresent()); + Assert.assertNull(backupMetadata.get().getLastValidated()); + } + + @Test + public void verifyBackupVersion1DateRange() throws Exception { + setUp(); + // Verify for backup version 1.0 + List backupVerificationResults = + backupVerification.verifyAllBackups( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupVerificationResults.isEmpty()); + Assert.assertTrue(backupVerificationResults.size() == numFakeBackups); + backupVerificationResults + .stream() + .forEach(b -> Assert.assertEquals(Instant.EPOCH, b.snapshotInstant)); + List backupMetadata = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupMetadata.isEmpty()); + Assert.assertTrue(backupMetadata.size() == numFakeBackups); + backupMetadata.stream().forEach(b -> Assert.assertNotNull(b.getLastValidated())); + + backupMetadata = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupMetadata.isEmpty()); + Assert.assertTrue(backupMetadata.size() == numFakeBackups); + backupMetadata.stream().forEach(b -> Assert.assertNull(b.getLastValidated())); + } + + @Test + public void verifyBackupVersion2() throws Exception { + setUp(); + // Verify for backup version 2.0 + Optional backupVerificationResultOptinal = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + false, + new DateRange(backupDate + "," + backupDate)); + Assert.assertTrue(backupVerificationResultOptinal.isPresent()); + Assert.assertEquals(Instant.EPOCH, backupVerificationResultOptinal.get().snapshotInstant); + Assert.assertEquals("some_random", backupVerificationResultOptinal.get().remotePath); + + Optional backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + Assert.assertTrue(backupMetadata.isPresent()); + Assert.assertNotNull(backupMetadata.get().getLastValidated()); + + // Retry the verification, it should not try and re-verify + backupVerificationResultOptinal = + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + false, + new DateRange(backupDate + "," + backupDate)); + Assert.assertTrue(backupVerificationResultOptinal.isPresent()); + Assert.assertEquals( + DateUtil.parseInstant(backupDate), + backupVerificationResultOptinal.get().snapshotInstant); + Assert.assertNotEquals("some_random", backupVerificationResultOptinal.get().remotePath); + Assert.assertEquals( + location.subpath(1, location.getNameCount()).toString(), + backupVerificationResultOptinal.get().remotePath); + + backupMetadata = + backupStatusMgr + .getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDate)) + .stream() + .findFirst(); + Assert.assertTrue(backupMetadata.isPresent()); + Assert.assertNull(backupMetadata.get().getLastValidated()); + } + + @Test + public void verifyBackupVersion2DateRange() throws Exception { + setUp(); + // Verify for backup version 2.0 + List backupVerificationResults = + backupVerification.verifyAllBackups( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupVerificationResults.isEmpty()); + Assert.assertTrue(backupVerificationResults.size() == numFakeBackups); + backupVerificationResults + .stream() + .forEach(b -> Assert.assertEquals(Instant.EPOCH, b.snapshotInstant)); + List backupMetadata = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupMetadata.isEmpty()); + Assert.assertTrue(backupMetadata.size() == numFakeBackups); + backupMetadata.stream().forEach(b -> Assert.assertNotNull(b.getLastValidated())); + + backupMetadata = + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, + new DateRange(backupDate + "," + backupDateEnd)); + Assert.assertTrue(!backupMetadata.isEmpty()); + Assert.assertTrue(backupMetadata.size() == numFakeBackups); + backupMetadata.stream().forEach(b -> Assert.assertNull(b.getLastValidated())); + } + + private BackupMetadata getBackupMetaData( + BackupVersion backupVersion, Instant startTime, Status status) throws Exception { + BackupMetadata backupMetadata = + new BackupMetadata(backupVersion, "123", new Date(startTime.toEpochMilli())); + backupMetadata.setCompleted( + new Date(startTime.plus(30, ChronoUnit.MINUTES).toEpochMilli())); + backupMetadata.setStatus(status); + backupMetadata.setSnapshotLocation(location.toString()); + return backupMetadata; + } + + private static BackupVerificationResult getBackupVerificationResult() { + BackupVerificationResult result = new BackupVerificationResult(); + result.valid = true; + result.manifestAvailable = true; + result.remotePath = "some_random"; + result.filesMatched = 123; + result.snapshotInstant = Instant.EPOCH; + return result; + } + + @Test + public void testGetMetaProxy() { + IMetaProxy metaProxy = backupVerification.getMetaProxy(BackupVersion.SNAPSHOT_META_SERVICE); + Assert.assertTrue(metaProxy != null); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestCompression.java b/priam/src/test/java/com/netflix/priam/backup/TestCompression.java index 30149be43..73e616f47 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestCompression.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestCompression.java @@ -17,170 +17,132 @@ package com.netflix.priam.backup; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.netflix.priam.compress.ChunkedStream; +import com.netflix.priam.compress.CompressionType; +import com.netflix.priam.compress.ICompression; import com.netflix.priam.compress.SnappyCompression; import com.netflix.priam.utils.SystemUtils; -import org.apache.commons.io.IOUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.xerial.snappy.SnappyInputStream; -import org.xerial.snappy.SnappyOutputStream; - import java.io.*; import java.util.Enumeration; import java.util.Iterator; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import java.util.zip.ZipOutputStream; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +public class TestCompression { -@Ignore("does this test really have to generate smoke to verify correct behavior?") -public class TestCompression -{ + private final File randomContentFile = new File("/tmp/content.txt"); @Before - public void setup() throws IOException - { - File f = new File("/tmp/compress-test.txt"); - FileOutputStream stream = new FileOutputStream(f); - for (int i = 0; i < (1000 * 1000); i++) - { - stream.write("This is a test... Random things happen... and you are responsible for it...\n".getBytes("UTF-8")); - stream.write("The quick brown fox jumps over the lazy dog.The quick brown fox jumps over the lazy dog.The quick brown fox jumps over the lazy dog.\n".getBytes("UTF-8")); + public void setup() throws IOException { + try (FileOutputStream stream = new FileOutputStream(randomContentFile)) { + for (int i = 0; i < (5 * 5); i++) { + stream.write( + "This is a test... Random things happen... and you are responsible for it...\n" + .getBytes("UTF-8")); + stream.write( + "The quick brown fox jumps over the lazy dog.The quick brown fox jumps over the lazy dog.The quick brown fox jumps over the lazy dog.\n" + .getBytes("UTF-8")); + } } - IOUtils.closeQuietly(stream); } @After - public void done() - { - File f = new File("/tmp/compress-test.txt"); - if (f.exists()) - f.delete(); - } - - void validateCompression(String uncompress, String compress) - { - File uncompressed = new File(uncompress); - File compressed = new File(compress); - assertTrue(uncompressed.length() > compressed.length()); + public void done() { + FileUtils.deleteQuietly(randomContentFile); } @Test - public void zip() throws IOException - { - BufferedInputStream source = null; - FileOutputStream dest = new FileOutputStream("/tmp/compressed.zip"); - ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream(dest)); - byte data[] = new byte[2048]; - File file = new File("/tmp/compress-test.txt"); - FileInputStream fi = new FileInputStream(file); - source = new BufferedInputStream(fi, 2048); - ZipEntry entry = new ZipEntry(file.getName()); - out.putNextEntry(entry); - int count; - while ((count = source.read(data, 0, 2048)) != -1) - { - out.write(data, 0, count); - } - IOUtils.closeQuietly(out); - validateCompression("/tmp/compress-test.txt", "/tmp/compressed.zip"); - } - - @Test - public void unzip() throws IOException - { - BufferedOutputStream dest1 = null; - BufferedInputStream is = null; - ZipFile zipfile = new ZipFile("/tmp/compressed.zip"); - Enumeration e = zipfile.entries(); - while (e.hasMoreElements()) - { - ZipEntry entry = (ZipEntry) e.nextElement(); - is = new BufferedInputStream(zipfile.getInputStream(entry)); - int c; - byte d[] = new byte[2048]; - FileOutputStream fos = new FileOutputStream("/tmp/compress-test-out-0.txt"); - dest1 = new BufferedOutputStream(fos, 2048); - while ((c = is.read(d, 0, 2048)) != -1) - { - dest1.write(d, 0, c); + public void zipTest() throws IOException { + String zipFileName = "/tmp/compressed.zip"; + File decompressedTempOutput = new File("/tmp/compress-test-out.txt"); + + try { + try (ZipOutputStream out = + new ZipOutputStream( + new BufferedOutputStream(new FileOutputStream(zipFileName))); + BufferedInputStream source = + new BufferedInputStream( + new FileInputStream(randomContentFile), 2048); ) { + byte data[] = new byte[2048]; + ZipEntry entry = new ZipEntry(randomContentFile.getName()); + out.putNextEntry(entry); + int count; + while ((count = source.read(data, 0, 2048)) != -1) { + out.write(data, 0, count); + } + } + assertTrue(randomContentFile.length() > new File(zipFileName).length()); + + ZipFile zipfile = new ZipFile(zipFileName); + Enumeration e = zipfile.entries(); + while (e.hasMoreElements()) { + ZipEntry entry = (ZipEntry) e.nextElement(); + try (BufferedInputStream is = + new BufferedInputStream(zipfile.getInputStream(entry)); + BufferedOutputStream dest1 = + new BufferedOutputStream( + new FileOutputStream(decompressedTempOutput), 2048)) { + int c; + byte d[] = new byte[2048]; + + while ((c = is.read(d, 0, 2048)) != -1) { + dest1.write(d, 0, c); + } + } } - IOUtils.closeQuietly(dest1); - IOUtils.closeQuietly(is); + String md1 = SystemUtils.md5(randomContentFile); + String md2 = SystemUtils.md5(decompressedTempOutput); + assertEquals(md1, md2); + } finally { + FileUtils.deleteQuietly(new File(zipFileName)); + FileUtils.deleteQuietly(decompressedTempOutput); } - String md1 = SystemUtils.md5(new File("/tmp/compress-test.txt")); - String md2 = SystemUtils.md5(new File("/tmp/compress-test-out-0.txt")); - assertEquals(md1, md2); } @Test - public void snappyCompress() throws IOException - { - FileInputStream fi = new FileInputStream("/tmp/compress-test.txt"); - SnappyOutputStream out = new SnappyOutputStream(new BufferedOutputStream(new FileOutputStream("/tmp/test0.snp"))); - BufferedInputStream origin = new BufferedInputStream(fi, 1024); - byte data[] = new byte[1024]; - int count; - while ((count = origin.read(data, 0, 1024)) != -1) - { - out.write(data, 0, count); - } - IOUtils.closeQuietly(origin); - IOUtils.closeQuietly(fi); - IOUtils.closeQuietly(out); - - validateCompression("/tmp/compress-test.txt", "/tmp/test0.snp"); + public void snappyTest() throws IOException { + ICompression compress = new SnappyCompression(); + testCompressor(compress); } - @Test - public void snappyDecompress() throws IOException - { - // decompress normally. - SnappyInputStream is = new SnappyInputStream(new BufferedInputStream(new FileInputStream("/tmp/test0.snp"))); - byte d[] = new byte[1024]; - FileOutputStream fos = new FileOutputStream("/tmp/compress-test-out-1.txt"); - BufferedOutputStream dest1 = new BufferedOutputStream(fos, 1024); - int c; - while ((c = is.read(d, 0, 1024)) != -1) - { - dest1.write(d, 0, c); - } - IOUtils.closeQuietly(dest1); - IOUtils.closeQuietly(is); - - String md1 = SystemUtils.md5(new File("/tmp/compress-test-out-1.txt")); - String md2 = SystemUtils.md5(new File("/tmp/compress-test.txt")); - assertEquals(md1, md2); - } + private void testCompressor(ICompression compress) throws IOException { + File compressedOutputFile = new File("/tmp/test1.compress"); + File decompressedTempOutput = new File("/tmp/compress-test-out.txt"); + long chunkSize = 5L * 1024 * 1024; + try { + + Iterator it = + new ChunkedStream( + new FileInputStream(randomContentFile), + chunkSize, + CompressionType.SNAPPY); + try (FileOutputStream ostream = new FileOutputStream(compressedOutputFile)) { + while (it.hasNext()) { + byte[] chunk = it.next(); + ostream.write(chunk); + } + ostream.flush(); + } - @Test - public void compress() throws IOException - { - SnappyCompression compress = new SnappyCompression(); - File file = new File(new File("/tmp/compress-test.txt"), "r"); - long chunkSize = 5L*1024*1024; - Iterator it = compress.compress(new AbstractBackupPath.RafInputStream(new RandomAccessFile(file, "r")), chunkSize); - FileOutputStream ostream = new FileOutputStream("/tmp/test1.snp"); - while (it.hasNext()) - { - byte[] chunk = it.next(); - ostream.write(chunk); + assertTrue(randomContentFile.length() > compressedOutputFile.length()); + + compress.decompressAndClose( + new FileInputStream(compressedOutputFile), + new FileOutputStream(decompressedTempOutput)); + String md1 = SystemUtils.md5(randomContentFile); + String md2 = SystemUtils.md5(decompressedTempOutput); + assertEquals(md1, md2); + } finally { + FileUtils.deleteQuietly(compressedOutputFile); + FileUtils.deleteQuietly(decompressedTempOutput); } - IOUtils.closeQuietly(ostream); - validateCompression("/tmp/compress-test.txt", "/tmp/test1.snp"); - } - - @Test - public void decompress() throws IOException - { - SnappyCompression compress = new SnappyCompression(); - compress.decompressAndClose(new FileInputStream("/tmp/test1.snp"), new FileOutputStream("/tmp/compress-test-out-2.txt")); - String md1 = SystemUtils.md5(new File("/tmp/compress-test.txt")); - String md2 = SystemUtils.md5(new File("/tmp/compress-test-out-2.txt")); - assertEquals(md1, md2); } } diff --git a/priam/src/test/java/com/netflix/priam/backup/TestCustomizedTPE.java b/priam/src/test/java/com/netflix/priam/backup/TestCustomizedTPE.java index b84cc1beb..0f02ddf39 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestCustomizedTPE.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestCustomizedTPE.java @@ -17,71 +17,56 @@ package com.netflix.priam.backup; -import org.junit.Assert; +import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor; import java.util.concurrent.Callable; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicInteger; - +import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.netflix.priam.scheduler.BlockingSubmitThreadPoolExecutor; - -public class TestCustomizedTPE -{ +public class TestCustomizedTPE { private static final Logger logger = LoggerFactory.getLogger(TestCustomizedTPE.class); private static final int MAX_THREADS = 10; // timeout 1 sec private static final int TIME_OUT = 10 * 1000; - private BlockingSubmitThreadPoolExecutor startTest = new BlockingSubmitThreadPoolExecutor(MAX_THREADS, new LinkedBlockingDeque(MAX_THREADS), TIME_OUT); + private final BlockingSubmitThreadPoolExecutor startTest = + new BlockingSubmitThreadPoolExecutor( + MAX_THREADS, new LinkedBlockingDeque<>(MAX_THREADS), TIME_OUT); @Test - public void testExecutor() throws InterruptedException - { + public void testExecutor() throws InterruptedException { final AtomicInteger count = new AtomicInteger(); - for (int i = 0; i < 100; i++) - { - startTest.submit(new Callable() - { - @Override - public Void call() throws Exception - { - Thread.sleep(100); - logger.info("Count:{}", count.incrementAndGet()); - return null; - } - }); + for (int i = 0; i < 100; i++) { + startTest.submit( + (Callable) + () -> { + Thread.sleep(100); + logger.info("Count:{}", count.incrementAndGet()); + return null; + }); } startTest.sleepTillEmpty(); Assert.assertEquals(100, count.get()); } @Test - public void testException() - { + public void testException() { boolean success = false; - try - { - for (int i = 0; i < 100; i++) - { - startTest.submit(new Callable() - { - @Override - public Void call() throws Exception - { - logger.info("Sleeping for 2 * timeout."); - Thread.sleep(TIME_OUT * 2); - return null; - } - }); + try { + for (int i = 0; i < 100; i++) { + startTest.submit( + (Callable) + () -> { + logger.info("Sleeping for 2 * timeout."); + Thread.sleep(TIME_OUT * 2); + return null; + }); } - } - catch (RuntimeException ex) - { + } catch (RuntimeException ex) { success = true; } Assert.assertTrue("Failure to timeout...", success); } - } diff --git a/priam/src/test/java/com/netflix/priam/backup/TestFileIterator.java b/priam/src/test/java/com/netflix/priam/backup/TestFileIterator.java index 7d4212dc7..e79b45f51 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestFileIterator.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestFileIterator.java @@ -24,10 +24,11 @@ import com.amazonaws.services.s3.model.S3ObjectSummary; import com.google.inject.Guice; import com.google.inject.Injector; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.aws.S3FileIterator; +import com.netflix.priam.aws.S3FileSystem; import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.utils.DateUtil; +import java.io.IOException; +import java.util.*; import mockit.Mock; import mockit.MockUp; import org.junit.Assert; @@ -35,60 +36,47 @@ import org.junit.Ignore; import org.junit.Test; -import java.io.IOException; -import java.util.*; - /** * Unit test for backup file iterator - * - * @author Praveen Sadhu * + * @author Praveen Sadhu */ -public class TestFileIterator -{ - private static Injector injector; +public class TestFileIterator { private static Date startTime, endTime; - private static Calendar cal; - private static AmazonS3Client s3client; - - private static IConfiguration conf; - private static InstanceIdentity factory; + private static S3FileSystem s3FileSystem; + private static String region; + private static String bucket = "TESTBUCKET"; + @BeforeClass - public static void setup() throws InterruptedException, IOException - { - s3client = new MockAmazonS3Client().getMockInstance(); - new MockObjectListing(); - - injector = Guice.createInjector(new BRTestModule()); - conf = injector.getInstance(IConfiguration.class); - factory = injector.getInstance(InstanceIdentity.class); - - cal = Calendar.getInstance(); - cal.set(2011, 7, 11, 0, 30, 0); - cal.set(Calendar.MILLISECOND, 0); - startTime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - endTime = cal.getTime(); + public static void setup() throws InterruptedException, IOException { + AmazonS3Client s3client = new MockAmazonS3Client().getMockInstance(); + new MockObjectListing(); + + Injector injector = Guice.createInjector(new BRTestModule()); + InstanceIdentity factory = injector.getInstance(InstanceIdentity.class); + region = factory.getInstanceInfo().getRegion(); + s3FileSystem = injector.getInstance(S3FileSystem.class); + s3FileSystem.setS3Client(s3client); + + DateUtil.DateRange dateRange = new DateUtil.DateRange("201108110030,201108110530"); + startTime = new Date(dateRange.getStartTime().toEpochMilli()); + endTime = new Date(dateRange.getEndTime().toEpochMilli()); } - // MockAmazonS3Client class - @Ignore - public static class MockAmazonS3Client extends MockUp - { - public static String bucketName = ""; - public static String prefix = ""; - + static class MockAmazonS3Client extends MockUp { @Mock - public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws AmazonClientException { + public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) + throws AmazonClientException { ObjectListing listing = new ObjectListing(); listing.setBucketName(listObjectsRequest.getBucketName()); listing.setPrefix(listObjectsRequest.getPrefix()); return listing; } - @Mock - public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws AmazonClientException { + @Mock + public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) + throws AmazonClientException { ObjectListing listing = new ObjectListing(); listing.setBucketName(previousObjectListing.getBucketName()); listing.setPrefix(previousObjectListing.getPrefix()); @@ -98,198 +86,268 @@ public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) // MockObjectListing class @Ignore - public static class MockObjectListing extends MockUp - { + public static class MockObjectListing extends MockUp { public static boolean truncated = true; public static boolean firstcall = true; - public static boolean simfilter = false;//Simulate filtering + public static boolean simfilter = false; // Simulate filtering @Mock - public List getObjectSummaries() - { - if (firstcall) - { + public List getObjectSummaries() { + if (firstcall) { firstcall = false; - if( simfilter ) - return getObjectSummaryEmpty(); + if (simfilter) return getObjectSummaryEmpty(); return getObjectSummary(); - } - else - { - if( simfilter ){ - simfilter = false;//reset + } else { + if (simfilter) { + simfilter = false; // reset return getObjectSummaryEmpty(); - } - else - truncated = false; + } else truncated = false; return getNextObjectSummary(); } } @Mock - public boolean isTruncated() - { + public boolean isTruncated() { return truncated; } } @Test - public void testIteratorEmptySet() - { - cal.set(2011, 7, 11, 6, 1, 0); - cal.set(Calendar.MILLISECOND, 0); - Date stime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - Date etime = cal.getTime(); - MockAmazonS3Client.bucketName = "TESTBUCKET"; - MockAmazonS3Client.prefix = conf.getBackupLocation() + "/" + conf.getDC() + "/" + conf.getAppName() + "/" + factory.getInstance().getToken(); - MockAmazonS3Client.prefix += "/20110811"; - - S3FileIterator fileIterator = new S3FileIterator(injector.getProvider(AbstractBackupPath.class), s3client, "TESTBUCKET", stime, etime); - Set files = new HashSet(); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); + public void testIteratorEmptySet() { + DateUtil.DateRange dateRange = new DateUtil.DateRange("201107110601,201107111101"); + Date stime = new Date(dateRange.getStartTime().toEpochMilli()); + Date etime = new Date(dateRange.getEndTime().toEpochMilli()); + + Iterator fileIterator = s3FileSystem.list(bucket, stime, etime); + Set files = new HashSet<>(); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); Assert.assertEquals(0, files.size()); } @Test - public void testIterator() - { + public void testIterator() { MockObjectListing.truncated = false; MockObjectListing.firstcall = true; MockObjectListing.simfilter = false; - MockAmazonS3Client.bucketName = "TESTBUCKET"; - MockAmazonS3Client.prefix = conf.getBackupLocation() + "/" + conf.getDC() + "/" + conf.getAppName() + "/" + factory.getInstance().getToken(); - MockAmazonS3Client.prefix += "/20110811"; - - S3FileIterator fileIterator = new S3FileIterator(injector.getProvider(AbstractBackupPath.class), s3client, "TESTBUCKET", startTime, endTime); - Set files = new HashSet(); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); + + Iterator fileIterator = s3FileSystem.list(bucket, startTime, endTime); + + Set files = new HashSet<>(); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); Assert.assertEquals(3, files.size()); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/META/meta.json")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/META/meta.json")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); } @Test - public void testIteratorTruncated() - { + public void testIteratorTruncated() { MockObjectListing.truncated = true; MockObjectListing.firstcall = true; MockObjectListing.simfilter = false; - MockAmazonS3Client.bucketName = "TESTBUCKET"; - MockAmazonS3Client.prefix = conf.getBackupLocation() + "/" + conf.getDC() + "/" + conf.getAppName() + "/" + factory.getInstance().getToken(); - MockAmazonS3Client.prefix += "/20110811"; - - S3FileIterator fileIterator = new S3FileIterator(injector.getProvider(AbstractBackupPath.class), s3client, "TESTBUCKET", startTime, endTime); - Set files = new HashSet(); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); - Assert.assertEquals(5, files.size()); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/META/meta.json")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); + Iterator fileIterator = s3FileSystem.list(bucket, startTime, endTime); + + Set files = new HashSet<>(); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); + Assert.assertEquals(5, files.size()); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/META/meta.json")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); } @Test - public void testIteratorTruncatedOOR() - { + public void testIteratorTruncatedOOR() { MockObjectListing.truncated = true; MockObjectListing.firstcall = true; MockObjectListing.simfilter = true; - MockAmazonS3Client.bucketName = "TESTBUCKET"; - MockAmazonS3Client.prefix = conf.getBackupLocation() + "/" + conf.getDC() + "/" + conf.getAppName() + "/" + factory.getInstance().getToken(); - MockAmazonS3Client.prefix += "/20110811"; - - S3FileIterator fileIterator = new S3FileIterator(injector.getProvider(AbstractBackupPath.class), s3client, "TESTBUCKET", startTime, endTime); - Set files = new HashSet(); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); + + Iterator fileIterator = s3FileSystem.list(bucket, startTime, endTime); + + Set files = new HashSet<>(); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); Assert.assertEquals(2, files.size()); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201107110030/SNAP/ks1/cf1/f1.db")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201107110430/SST/ks1/cf1/f2.db")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201107110030/META/meta.json")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201107110600/SST/ks1/cf1/f3.db")); - - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); - + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201107110030/SNAP/ks1/cf1/f1.db")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201107110430/SST/ks1/cf1/f2.db")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201107110030/META/meta.json")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201107110600/SST/ks1/cf1/f3.db")); + + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); } @Test - public void testRestorePathIteration() - { + public void testRestorePathIteration() { MockObjectListing.truncated = true; MockObjectListing.firstcall = true; MockObjectListing.simfilter = false; - MockAmazonS3Client.bucketName = "RESTOREBUCKET"; - MockAmazonS3Client.prefix = "test_restore_backup/fake-restore-region/fakerestorecluster" + "/" + factory.getInstance().getToken(); - MockAmazonS3Client.prefix += "/20110811"; - S3FileIterator fileIterator = new S3FileIterator(injector.getProvider(AbstractBackupPath.class), s3client, "RESTOREBUCKET/test_restore_backup/fake-restore-region/fakerestorecluster", startTime, endTime); - Set files = new HashSet(); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); - while (fileIterator.hasNext()) - files.add(fileIterator.next().getRemotePath()); + Iterator fileIterator = + s3FileSystem.list( + "RESTOREBUCKET/test_restore_backup/fake-restore-region/fakerestorecluster", + startTime, + endTime); - Assert.assertEquals(5, files.size()); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/META/meta.json")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); + Set files = new HashSet<>(); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); + while (fileIterator.hasNext()) files.add(fileIterator.next().getRemotePath()); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); - Assert.assertTrue(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); - Assert.assertFalse(files.contains("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); + Assert.assertEquals(5, files.size()); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/META/meta.json")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db")); + Assert.assertTrue( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db")); + Assert.assertFalse( + files.contains( + "test_backup/" + + region + + "/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db")); } - public static List getObjectSummary() - { - List list = new ArrayList(); + private static List getObjectSummary() { + List list = new ArrayList<>(); S3ObjectSummary summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db"); list.add(summary); summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110430/SST/ks1/cf1/f2.db"); list.add(summary); summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110600/SST/ks1/cf1/f3.db"); list.add(summary); summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/META/meta.json"); + summary.setKey("test_backup/" + region + "/fakecluster/123456/201108110030/META/meta.json"); list.add(summary); return list; } - public static List getObjectSummaryEmpty() - { - return new ArrayList(); + private static List getObjectSummaryEmpty() { + return new ArrayList<>(); } - public static List getNextObjectSummary() - { - List list = new ArrayList(); + private static List getNextObjectSummary() { + List list = new ArrayList<>(); S3ObjectSummary summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110030/SNAP/ks2/cf1/f1.db"); list.add(summary); summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110430/SST/ks2/cf1/f2.db"); list.add(summary); summary = new S3ObjectSummary(); - summary.setKey("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db"); + summary.setKey( + "test_backup/" + region + "/fakecluster/123456/201108110600/SST/ks2/cf1/f3.db"); list.add(summary); return list; } - -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestRestore.java b/priam/src/test/java/com/netflix/priam/backup/TestRestore.java deleted file mode 100644 index 5fa11351f..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/TestRestore.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup; - -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Key; -import com.google.inject.name.Names; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.restore.Restore; -import org.apache.commons.io.FileUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; - -public class TestRestore -{ - private static Injector injector; - private static FakeBackupFileSystem filesystem; - private static ArrayList fileList; - private static Calendar cal; - private static IConfiguration conf; - - @BeforeClass - public static void setup() throws InterruptedException, IOException - { - injector = Guice.createInjector(new BRTestModule()); - filesystem = (FakeBackupFileSystem) injector.getInstance(Key.get(IBackupFileSystem.class,Names.named("incr_restore"))); - conf = injector.getInstance(IConfiguration.class); - fileList = new ArrayList(); - File cassdir = new File(conf.getDataFileLocation()); - cassdir.mkdirs(); - cal = Calendar.getInstance(); - } - - @AfterClass - public static void cleanup() throws IOException - { - File file = new File("cass"); - FileUtils.deleteQuietly(file); - } - - private static void populateBackupFileSystem(String baseDir){ - fileList.clear(); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/META/meta.json"); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db"); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks1/cf1/f2.db"); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110030/SNAP/ks2/cf1/f2.db"); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110530/SST/ks2/cf1/f3.db"); - fileList.add(baseDir + "/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110600/SST/ks2/cf1/f4.db"); - filesystem.baseDir = baseDir; - filesystem.region = FakeConfiguration.FAKE_REGION; - filesystem.clusterName = "fakecluster"; - filesystem.setupTest(fileList); - } - - @Test - public void testRestore() throws Exception - { - populateBackupFileSystem("test_backup"); - File tmpdir = new File(conf.getDataFileLocation() + "/test"); - tmpdir.mkdir(); - Assert.assertTrue(tmpdir.exists()); - Restore restore = injector.getInstance(Restore.class); - cal.set(2011, Calendar.AUGUST, 11, 0, 30, 0); - cal.set(Calendar.MILLISECOND, 0); - Date startTime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - restore.restore(startTime, cal.getTime()); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(0))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); - tmpdir = new File(conf.getDataFileLocation() + "/test"); - Assert.assertFalse(tmpdir.exists()); - } - - //Pick latest file - @Test - public void testRestoreLatest() throws Exception - { - populateBackupFileSystem("test_backup"); - String metafile = "test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108110130/META/meta.json"; - filesystem.addFile(metafile); - Restore restore = injector.getInstance(Restore.class); - cal.set(2011, Calendar.AUGUST, 11, 0, 30, 0); - cal.set(Calendar.MILLISECOND, 0); - Date startTime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - restore.restore(startTime, cal.getTime()); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(0))); - Assert.assertTrue(filesystem.downloadedFiles.contains(metafile)); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); - } - - @Test - public void testNoSnapshots() throws Exception - { - try { - filesystem.setupTest(new ArrayList()); - Restore restore = injector.getInstance(Restore.class); - cal.set(2011, Calendar.SEPTEMBER, 11, 0, 30); - Date startTime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - restore.restore(startTime, cal.getTime()); - Assert.assertFalse(true);//No exception thrown - } catch (IllegalStateException e) { - //We are ok. No snapshot found. - } catch (Exception e) { - throw e; - } - - } - - - @Test - public void testRestoreFromDiffCluster() throws Exception - { - populateBackupFileSystem("test_backup_new"); - FakeConfiguration conf = (FakeConfiguration)injector.getInstance(IConfiguration.class); - conf.setRestorePrefix("RESTOREBUCKET/test_backup_new/"+FakeConfiguration.FAKE_REGION+"/fakecluster"); - Restore restore = injector.getInstance(Restore.class); - cal.set(2011, Calendar.AUGUST, 11, 0, 30, 0); - cal.set(Calendar.MILLISECOND, 0); - Date startTime = cal.getTime(); - cal.add(Calendar.HOUR, 5); - restore.restore(startTime, cal.getTime()); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(0))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); - Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); - Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); - conf.setRestorePrefix(""); - } -} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestS3FileSystem.java b/priam/src/test/java/com/netflix/priam/backup/TestS3FileSystem.java index 28d5d861a..0a297433b 100644 --- a/priam/src/test/java/com/netflix/priam/backup/TestS3FileSystem.java +++ b/priam/src/test/java/com/netflix/priam/backup/TestS3FileSystem.java @@ -17,204 +17,250 @@ package com.netflix.priam.backup; +import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonServiceException; +import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.model.*; +import com.amazonaws.services.s3.model.lifecycle.LifecycleFilter; +import com.amazonaws.services.s3.model.lifecycle.LifecyclePrefixPredicate; +import com.google.api.client.util.Preconditions; +import com.google.common.collect.Lists; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.aws.DataPart; +import com.netflix.priam.aws.RemoteBackupPath; +import com.netflix.priam.aws.S3FileSystem; +import com.netflix.priam.aws.S3PartUploader; +import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.merics.BackupMetrics; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; - -import com.amazonaws.services.s3.model.*; -import org.junit.Assert; +import java.util.Optional; +import java.util.stream.Collectors; import mockit.Mock; import mockit.MockUp; - +import org.apache.commons.io.FileUtils; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.amazonaws.AmazonClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.google.common.collect.Lists; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.aws.DataPart; -import com.netflix.priam.aws.S3BackupPath; -import com.netflix.priam.aws.S3FileSystem; -import com.netflix.priam.aws.S3PartUploader; -import com.netflix.priam.backup.AbstractBackupPath.BackupFileType; - -public class TestS3FileSystem -{ +public class TestS3FileSystem { private static Injector injector; private static final Logger logger = LoggerFactory.getLogger(TestS3FileSystem.class); - private static final String FILE_PATH = "target/data/Keyspace1/Standard1/backups/201108082320/Keyspace1-Standard1-ia-1-Data.db"; + private static final File DIR = new File("target/data/KS1/CF1/backups/201108082320/"); + private static BackupMetrics backupMetrics; + private static String region; + private static IConfiguration configuration; + + public TestS3FileSystem() { + if (injector == null) injector = Guice.createInjector(new BRTestModule()); + + if (backupMetrics == null) backupMetrics = injector.getInstance(BackupMetrics.class); + + if (configuration == null) configuration = injector.getInstance(IConfiguration.class); + + InstanceInfo instanceInfo = injector.getInstance(InstanceInfo.class); + region = instanceInfo.getRegion(); + } @BeforeClass - public static void setup() throws InterruptedException, IOException - { - new MockS3PartUploader(); - new MockAmazonS3Client(); - - injector = Guice.createInjector(new BRTestModule()); - - File dir1 = new File("target/data/Keyspace1/Standard1/backups/201108082320"); - if (!dir1.exists()) - dir1.mkdirs(); - File file = new File(FILE_PATH); - long fiveKB = (5L * 1024); - byte b = 8; - BufferedOutputStream bos1 = new BufferedOutputStream(new FileOutputStream(file)); - for (long i = 0; i < fiveKB; i++) - { - bos1.write(b); - } - bos1.close(); + public static void setUp() { + new MockS3PartUploader(); + new MockAmazonS3Client(); + if (!DIR.exists()) DIR.mkdirs(); } @AfterClass - public static void cleanup() - { - File file = new File(FILE_PATH); - file.delete(); + public static void cleanup() throws IOException { + FileUtils.cleanDirectory(DIR); } @Test - public void testFileUpload() throws Exception - { + public void testFileUpload() throws Exception { MockS3PartUploader.setup(); - S3FileSystem fs = injector.getInstance(S3FileSystem.class); - // String snapshotfile = "target/data/Keyspace1/Standard1/backups/201108082320/Keyspace1-Standard1-ia-1-Data.db"; - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); - backupfile.parseLocal(new File(FILE_PATH), BackupFileType.SNAP); - //fs.upload(backupfile, backupfile.localReader()); - //Assert.assertEquals(1, MockS3PartUploader.compattempts); + AbstractFileSystem fs = injector.getInstance(NullBackupFileSystem.class); + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); + backupfile.parseLocal(localFile(), BackupFileType.SNAP); + long noOfFilesUploaded = backupMetrics.getUploadRate().count(); + // temporary hack to allow tests to complete in a timely fashion + // This will be removed once we stop inheriting from AbstractFileSystem + fs.uploadAndDeleteInternal(backupfile, Instant.EPOCH, 0 /* retries */); + Assert.assertEquals(1, backupMetrics.getUploadRate().count() - noOfFilesUploaded); + } + + @Test + public void testFileUploadDeleteExists() throws Exception { + MockS3PartUploader.setup(); + IBackupFileSystem fs = injector.getInstance(NullBackupFileSystem.class); + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); + backupfile.parseLocal(localFile(), BackupFileType.SST_V2); + fs.uploadAndDelete(backupfile, false /* async */); + Assert.assertTrue(fs.checkObjectExists(Paths.get(backupfile.getRemotePath()))); + // Lets delete the file now. + List deleteFiles = Lists.newArrayList(); + deleteFiles.add(Paths.get(backupfile.getRemotePath())); + fs.deleteRemoteFiles(deleteFiles); + Assert.assertFalse(fs.checkObjectExists(Paths.get(backupfile.getRemotePath()))); } @Test - public void testFileUploadFailures() throws Exception - { + public void testFileUploadFailures() throws Exception { MockS3PartUploader.setup(); MockS3PartUploader.partFailure = true; + long noOfFailures = backupMetrics.getInvalidUploads().count(); S3FileSystem fs = injector.getInstance(S3FileSystem.class); - String snapshotfile = "target/data/Keyspace1/Standard1/backups/201108082320/Keyspace1-Standard1-ia-1-Data.db"; - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); - backupfile.parseLocal(new File(snapshotfile), BackupFileType.SNAP); - try - { - fs.upload(backupfile, backupfile.localReader()); - } - catch (BackupRestoreException e) - { + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); + backupfile.parseLocal(localFile(), BackupFileType.SNAP); + try { + // temporary hack to allow tests to complete in a timely fashion + // This will be removed once we stop inheriting from AbstractFileSystem + fs.uploadAndDeleteInternal(backupfile, Instant.EPOCH, 0 /* retries */); + } catch (BackupRestoreException e) { // ignore } - //Assert.assertEquals(RetryableCallable.DEFAULT_NUMBER_OF_RETRIES, MockS3PartUploader.partAttempts); Assert.assertEquals(0, MockS3PartUploader.compattempts); + Assert.assertEquals(1, backupMetrics.getInvalidUploads().count() - noOfFailures); } @Test - public void testFileUploadCompleteFailure() throws Exception - { + public void testFileUploadCompleteFailure() throws Exception { MockS3PartUploader.setup(); MockS3PartUploader.completionFailure = true; S3FileSystem fs = injector.getInstance(S3FileSystem.class); - String snapshotfile = "target/data/Keyspace1/Standard1/backups/201108082320/Keyspace1-Standard1-ia-1-Data.db"; - S3BackupPath backupfile = injector.getInstance(S3BackupPath.class); - backupfile.parseLocal(new File(snapshotfile), BackupFileType.SNAP); - try - { - fs.upload(backupfile, backupfile.localReader()); - } - catch (BackupRestoreException e) - { + fs.setS3Client(new MockAmazonS3Client().getMockInstance()); + RemoteBackupPath backupfile = injector.getInstance(RemoteBackupPath.class); + backupfile.parseLocal(localFile(), BackupFileType.SNAP); + try { + // temporary hack to allow tests to complete in a timely fashion + // This will be removed once we stop inheriting from AbstractFileSystem + fs.uploadAndDeleteInternal(backupfile, Instant.EPOCH, 0 /* retries */); + } catch (BackupRestoreException e) { // ignore } - //Assert.assertEquals(1, MockS3PartUploader.partAttempts); - // No retries with the new logic - //Assert.assertEquals(1, MockS3PartUploader.compattempts); } @Test - public void testCleanupAdd() throws Exception - { - MockAmazonS3Client.ruleAvailable = false; + public void testCleanupAdd() throws Exception { + MockAmazonS3Client.setRuleAvailable(false); S3FileSystem fs = injector.getInstance(S3FileSystem.class); fs.cleanup(); Assert.assertEquals(1, MockAmazonS3Client.bconf.getRules().size()); BucketLifecycleConfiguration.Rule rule = MockAmazonS3Client.bconf.getRules().get(0); - logger.info(rule.getPrefix()); - Assert.assertEquals("casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/", rule.getPrefix()); - Assert.assertEquals(5, rule.getExpirationInDays()); + Assert.assertEquals("casstestbackup/" + region + "/fake-app/", rule.getId()); + Assert.assertEquals(configuration.getBackupRetentionDays(), rule.getExpirationInDays()); } @Test - public void testCleanupIgnore() throws Exception - { - MockAmazonS3Client.ruleAvailable = true; + public void testCleanupIgnore() throws Exception { + MockAmazonS3Client.setRuleAvailable(true); S3FileSystem fs = injector.getInstance(S3FileSystem.class); fs.cleanup(); Assert.assertEquals(1, MockAmazonS3Client.bconf.getRules().size()); BucketLifecycleConfiguration.Rule rule = MockAmazonS3Client.bconf.getRules().get(0); - logger.info(rule.getPrefix()); - Assert.assertEquals("casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/", rule.getPrefix()); - Assert.assertEquals(5, rule.getExpirationInDays()); + Assert.assertEquals("casstestbackup/" + region + "/fake-app/", rule.getId()); + Assert.assertEquals(configuration.getBackupRetentionDays(), rule.getExpirationInDays()); + } + + @Test + public void testCleanupUpdate() throws Exception { + MockAmazonS3Client.setRuleAvailable(true); + S3FileSystem fs = injector.getInstance(S3FileSystem.class); + String clusterPrefix = "casstestbackup/" + region + "/fake-app/"; + MockAmazonS3Client.updateRule( + MockAmazonS3Client.getBucketLifecycleConfig(clusterPrefix, 2)); + fs.cleanup(); + Assert.assertEquals(1, MockAmazonS3Client.bconf.getRules().size()); + BucketLifecycleConfiguration.Rule rule = MockAmazonS3Client.bconf.getRules().get(0); + Assert.assertEquals("casstestbackup/" + region + "/fake-app/", rule.getId()); + Assert.assertEquals(configuration.getBackupRetentionDays(), rule.getExpirationInDays()); + } + + @Test + public void testDeleteObjects() throws Exception { + S3FileSystem fs = injector.getInstance(S3FileSystem.class); + List filesToDelete = new ArrayList<>(); + // Empty files + fs.deleteRemoteFiles(filesToDelete); + + // Lets add some random files now. + filesToDelete.add(Paths.get("a.txt")); + fs.deleteRemoteFiles(filesToDelete); + + // Emulate error now. + try { + MockAmazonS3Client.emulateError = true; + fs.deleteRemoteFiles(filesToDelete); + Assert.assertTrue(false); + } catch (BackupRestoreException e) { + Assert.assertTrue(true); + } + } + + private File localFile() throws IOException { + String caller = Thread.currentThread().getStackTrace()[1].getMethodName(); + File file = new File(DIR + caller + "KS1-CF1-ia-1-Data.db"); + if (file.createNewFile()) { + byte[] data = new byte[5 << 10]; + Arrays.fill(data, (byte) 8); + try (BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(file))) { + os.write(data); + } + } + Preconditions.checkState(file.exists()); + return file; } - // Mock Nodeprobe class - @Ignore - public static class MockS3PartUploader extends MockUp - { - public static int compattempts = 0; - public static int partAttempts = 0; - public static boolean partFailure = false; - public static boolean completionFailure = false; + static class MockS3PartUploader extends MockUp { + static int compattempts = 0; + static int partAttempts = 0; + static boolean partFailure = false; + static boolean completionFailure = false; private static List partETags; @Mock - public void $init(AmazonS3 client, DataPart dp, List partETags) - { + public void $init(AmazonS3 client, DataPart dp, List partETags) { MockS3PartUploader.partETags = partETags; } @Mock - private Void uploadPart() throws AmazonClientException, BackupRestoreException - { + private Void uploadPart() throws AmazonClientException, BackupRestoreException { ++partAttempts; - if (partFailure) - throw new BackupRestoreException("Test exception"); + if (partFailure) throw new BackupRestoreException("Test exception"); partETags.add(new PartETag(0, null)); return null; } @Mock - public CompleteMultipartUploadResult completeUpload() throws BackupRestoreException - { + public CompleteMultipartUploadResult completeUpload() throws BackupRestoreException { ++compattempts; - if (completionFailure) - throw new BackupRestoreException("Test exception"); + if (completionFailure) throw new BackupRestoreException("Test exception"); return null; } @Mock - public void abortUpload() - { - } - - @Mock - public Void retriableCall() throws AmazonClientException, BackupRestoreException - { + public Void retriableCall() throws AmazonClientException, BackupRestoreException { logger.info("MOCK UPLOADING..."); return uploadPart(); } - public static void setup() - { + public static void setup() { compattempts = 0; partAttempts = 0; partFailure = false; @@ -222,43 +268,84 @@ public static void setup() } } - @Ignore - public static class MockAmazonS3Client extends MockUp - { - public static boolean ruleAvailable = false; - public static BucketLifecycleConfiguration bconf = new BucketLifecycleConfiguration(); - @Mock - public void $init() - { - } + static class MockAmazonS3Client extends MockUp { + private boolean ruleAvailable = false; + static BucketLifecycleConfiguration bconf; + static boolean emulateError = false; @Mock - public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException { + public InitiateMultipartUploadResult initiateMultipartUpload( + InitiateMultipartUploadRequest initiateMultipartUploadRequest) + throws AmazonClientException { return new InitiateMultipartUploadResult(); } - + + public PutObjectResult putObject(PutObjectRequest putObjectRequest) + throws SdkClientException { + PutObjectResult result = new PutObjectResult(); + result.setETag("ad"); + return result; + } + @Mock - public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) - { - List rules = Lists.newArrayList(); - if( ruleAvailable ) - { - String clusterPath = "casstestbackup/"+FakeConfiguration.FAKE_REGION+"/fake-app/"; - BucketLifecycleConfiguration.Rule rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(5).withPrefix(clusterPath); - rule.setStatus(BucketLifecycleConfiguration.ENABLED); - rule.setId(clusterPath); - rules.add(rule); - - } - bconf.setRules(rules); + public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) { return bconf; } - + @Mock - public void setBucketLifecycleConfiguration(String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) - { + public void setBucketLifecycleConfiguration( + String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) { bconf = bucketLifecycleConfiguration; } + @Mock + public DeleteObjectsResult deleteObjects(DeleteObjectsRequest var1) + throws SdkClientException, AmazonServiceException { + if (emulateError) throw new AmazonServiceException("Unable to reach AWS"); + return null; + } + + static BucketLifecycleConfiguration.Rule getBucketLifecycleConfig( + String prefix, int expirationDays) { + return new BucketLifecycleConfiguration.Rule() + .withExpirationInDays(expirationDays) + .withFilter(new LifecycleFilter(new LifecyclePrefixPredicate(prefix))) + .withStatus(BucketLifecycleConfiguration.ENABLED) + .withId(prefix); + } + + static void setRuleAvailable(boolean ruleAvailable) { + if (ruleAvailable) { + bconf = new BucketLifecycleConfiguration(); + if (bconf.getRules() == null) bconf.setRules(Lists.newArrayList()); + + List rules = bconf.getRules(); + String clusterPath = "casstestbackup/" + region + "/fake-app/"; + + List potentialRules = + rules.stream() + .filter(rule -> rule.getId().equalsIgnoreCase(clusterPath)) + .collect(Collectors.toList()); + if (potentialRules == null || potentialRules.isEmpty()) + rules.add( + getBucketLifecycleConfig( + clusterPath, configuration.getBackupRetentionDays())); + } + } + + static void updateRule(BucketLifecycleConfiguration.Rule updatedRule) { + List rules = bconf.getRules(); + Optional updateRule = + rules.stream() + .filter(rule -> rule.getId().equalsIgnoreCase(updatedRule.getId())) + .findFirst(); + if (updateRule.isPresent()) { + rules.remove(updateRule.get()); + rules.add(updatedRule); + } else { + rules.add(updatedRule); + } + bconf.setRules(rules); + } } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/backup/TestSnapshotStatusMgr.java b/priam/src/test/java/com/netflix/priam/backup/TestSnapshotStatusMgr.java deleted file mode 100644 index a04fd190c..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/TestSnapshotStatusMgr.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup; - -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.utils.DateUtil; -import junit.framework.Assert; -import org.joda.time.DateTime; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.Date; -import java.util.List; - -/** - * Created by aagrawal on 7/11/17. - */ -public class TestSnapshotStatusMgr { - private static final Logger logger = LoggerFactory.getLogger(TestSnapshotStatusMgr.class); - - private static IBackupStatusMgr backupStatusMgr; - - @BeforeClass - public static void setup() { - Injector injector = Guice.createInjector(new BRTestModule()); - //cleanup old saved file, if any - IConfiguration configuration = injector.getInstance(IConfiguration.class); - File f = new File(configuration.getBackupStatusFileLoc()); - if (f.exists()) - f.delete(); - - backupStatusMgr = injector.getInstance(IBackupStatusMgr.class); - } - - @Test - public void testSnapshotStatusAddFinish() throws Exception - { - Date startTime = DateUtil.getDate("198407110720"); - - BackupMetadata backupMetadata = new BackupMetadata("123", startTime); - backupStatusMgr.start(backupMetadata); - List metadataList = backupStatusMgr.locate(startTime); - Assert.assertNotNull(metadataList); - Assert.assertTrue(!metadataList.isEmpty()); - Assert.assertEquals(1, metadataList.size()); - Assert.assertEquals(startTime, metadataList.get(0).getStart()); - logger.info("Snapshot start: {}", metadataList.get(0)); - - backupStatusMgr.finish(backupMetadata); - metadataList = backupStatusMgr.locate(startTime); - Assert.assertNotNull(metadataList); - Assert.assertTrue(!metadataList.isEmpty()); - Assert.assertEquals(1, metadataList.size()); - Assert.assertEquals(Status.FINISHED, metadataList.get(0).getStatus()); - Assert.assertTrue(metadataList.get(0).getCompleted() != null); - logger.info("Snapshot finished: {}", metadataList.get(0)); - } - - @Test - public void testSnapshotStatusAddFailed() throws Exception - { - Date startTime = DateUtil.getDate("198407120720"); - - BackupMetadata backupMetadata = new BackupMetadata("123", startTime); - backupStatusMgr.start(backupMetadata); - List metadataList = backupStatusMgr.locate(startTime); - Assert.assertNotNull(metadataList); - Assert.assertTrue(!metadataList.isEmpty()); - Assert.assertEquals(1, metadataList.size()); - Assert.assertEquals(startTime, metadataList.get(0).getStart()); - logger.info("Snapshot start: {}", metadataList.get(0)); - - backupStatusMgr.failed(backupMetadata); - metadataList = backupStatusMgr.locate(startTime); - Assert.assertNotNull(metadataList); - Assert.assertTrue(!metadataList.isEmpty()); - Assert.assertEquals(1, metadataList.size()); - Assert.assertEquals(Status.FAILED, metadataList.get(0).getStatus()); - Assert.assertTrue(metadataList.get(0).getCompleted() != null); - logger.info("Snapshot failed: {}", metadataList.get(0)); - } - - @Test - public void testSnapshotStatusMultiAddFinishInADay() throws Exception - { - final int noOfEntries = 10; - Date startTime = DateUtil.getDate("19840101"); - - for (int i = 0 ; i < noOfEntries; i++) - { - Date time = new DateTime(startTime.getTime()).plusHours(i).toDate(); - BackupMetadata backupMetadata = new BackupMetadata("123", time); - backupStatusMgr.start(backupMetadata); - backupStatusMgr.finish(backupMetadata); - } - - List metadataList = backupStatusMgr.locate(startTime); - Assert.assertEquals(noOfEntries, metadataList.size()); - logger.info(metadataList.toString()); - - //Ensure that list is always maintained from latest to eldest - Date latest = null; - for (BackupMetadata backupMetadata: metadataList) { - if (latest == null) - latest = backupMetadata.getStart(); - else - { - Assert.assertTrue(backupMetadata.getStart().before(latest)); - latest = backupMetadata.getStart(); - } - } - } - - @Test - public void testSnapshotStatusSize() throws Exception - { - final int noOfEntries = backupStatusMgr.getCapacity() + 1; - Date startTime = DateUtil.getDate("19850101"); - - for (int i = 0 ; i < noOfEntries; i++) - { - Date time = new DateTime(startTime.getTime()).plusDays(i).toDate(); - BackupMetadata backupMetadata = new BackupMetadata("123", time); - backupStatusMgr.start(backupMetadata); - backupStatusMgr.finish(backupMetadata); - } - - // Verify there is only capacity entries - Assert.assertEquals(backupStatusMgr.getCapacity(), backupStatusMgr.getAllSnapshotStatus().size()); - } - -} diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/DoubleRingTest.java b/priam/src/test/java/com/netflix/priam/backup/identity/DoubleRingTest.java index 8366a454b..74593f3c6 100644 --- a/priam/src/test/java/com/netflix/priam/backup/identity/DoubleRingTest.java +++ b/priam/src/test/java/com/netflix/priam/backup/identity/DoubleRingTest.java @@ -17,59 +17,48 @@ package com.netflix.priam.backup.identity; -import java.util.List; - -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.netflix.priam.identity.DoubleRing; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.identity.PriamInstance; +import java.util.List; +import org.junit.Test; -import static org.junit.Assert.assertEquals; - -public class DoubleRingTest extends InstanceTestUtils -{ +public class DoubleRingTest extends InstanceTestUtils { @Test - public void testDouble() throws Exception - { + public void testDouble() throws Exception { createInstances(); int originalSize = factory.getAllIds(config.getAppName()).size(); - new DoubleRing(config, factory, tokenManager).doubleSlots(); - List doubled = factory.getAllIds(config.getAppName()); - factory.sort(doubled); - + new DoubleRing(config, factory, tokenManager, instanceInfo).doubleSlots(); + ImmutableSet doubled = factory.getAllIds(config.getAppName()); assertEquals(originalSize * 2, doubled.size()); - validate(doubled); + validate(doubled.asList()); } - private void validate(List doubled) - { + private void validate(List doubled) { List validator = Lists.newArrayList(); - for (int i = 0; i < doubled.size(); i++) - { - validator.add(tokenManager.createToken(i, doubled.size(), config.getDC())); - + for (int i = 0; i < doubled.size(); i++) { + validator.add(tokenManager.createToken(i, doubled.size(), instanceInfo.getRegion())); } - - for (int i = 0; i < doubled.size(); i++) - { + + for (int i = 0; i < doubled.size(); i++) { PriamInstance ins = doubled.get(i); assertEquals(validator.get(i), ins.getToken()); - int id = ins.getId() - tokenManager.regionOffset(config.getDC()); + int id = ins.getId() - tokenManager.regionOffset(instanceInfo.getRegion()); System.out.println(ins); - if (0 != id % 2) - assertEquals(ins.getInstanceId(), InstanceIdentity.DUMMY_INSTANCE_ID); + if (0 != id % 2) assertEquals(ins.getInstanceId(), InstanceIdentity.DUMMY_INSTANCE_ID); } } @Test - public void testBR() throws Exception - { + public void testBR() throws Exception { createInstances(); int intialSize = factory.getAllIds(config.getAppName()).size(); - DoubleRing ring = new DoubleRing(config, factory, tokenManager); + DoubleRing ring = new DoubleRing(config, factory, tokenManager, instanceInfo); ring.backup(); ring.doubleSlots(); assertEquals(intialSize * 2, factory.getAllIds(config.getAppName()).size()); diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/FakeInstanceEnvIdentity.java b/priam/src/test/java/com/netflix/priam/backup/identity/FakeInstanceEnvIdentity.java deleted file mode 100644 index a9ef419c1..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/identity/FakeInstanceEnvIdentity.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup.identity; - -import com.netflix.priam.identity.InstanceEnvIdentity; - -public class FakeInstanceEnvIdentity implements InstanceEnvIdentity { - - @Override - public Boolean isClassic() { - return null; - } - - @Override - public Boolean isDefaultVpc() { - return null; - } - - @Override - public Boolean isNonDefaultVpc() { - return null; - } - -} \ No newline at end of file diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/InstanceIdentityTest.java b/priam/src/test/java/com/netflix/priam/backup/identity/InstanceIdentityTest.java index 203fcf86f..0b2ad7f20 100644 --- a/priam/src/test/java/com/netflix/priam/backup/identity/InstanceIdentityTest.java +++ b/priam/src/test/java/com/netflix/priam/backup/identity/InstanceIdentityTest.java @@ -17,25 +17,21 @@ package com.netflix.priam.backup.identity; +import static org.junit.Assert.*; + +import com.google.common.collect.ImmutableList; import com.netflix.priam.identity.DoubleRing; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.identity.PriamInstance; - import org.junit.Test; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class InstanceIdentityTest extends InstanceTestUtils -{ +public class InstanceIdentityTest extends InstanceTestUtils { @Test - public void testCreateToken() throws Exception - { + public void testCreateToken() throws Exception { identity = createInstanceIdentity("az1", "fakeinstance1"); - int hash = tokenManager.regionOffset(config.getDC()); + int hash = tokenManager.regionOffset(instanceInfo.getRegion()); assertEquals(0, identity.getInstance().getId() - hash); identity = createInstanceIdentity("az1", "fakeinstance2"); @@ -44,7 +40,7 @@ public void testCreateToken() throws Exception identity = createInstanceIdentity("az1", "fakeinstance3"); assertEquals(6, identity.getInstance().getId() - hash); - // try next region + // try next zone identity = createInstanceIdentity("az2", "fakeinstance4"); assertEquals(1, identity.getInstance().getId() - hash); @@ -64,51 +60,60 @@ public void testCreateToken() throws Exception identity = createInstanceIdentity("az3", "fakeinstance9"); assertEquals(8, identity.getInstance().getId() - hash); } - + @Test - public void testGetSeeds() throws Exception - { - createInstances(); - identity = createInstanceIdentity("az1", "fakeinstance1"); - assertEquals(3, identity.getSeeds().size()); + public void testGetSeedsAutobootstrapTrue() throws Exception { + boolean previous = (Boolean) config.getFakeConfig("auto_bootstrap"); + try { + config.setFakeConfig("auto_bootstrap", true); + createInstances(); + identity = createInstanceIdentity("az1", "fakeinstance1"); + assertEquals(3, identity.getSeeds().size()); + assertFalse(identity.getSeeds().contains("fakeinstance1")); + } finally { + config.setFakeConfig("auto_bootstrap", previous); + } } @Test - public void testDoubleSlots() throws Exception - { + public void testGetSeedsAutobootstrapFalse() throws Exception { + boolean previous = (Boolean) config.getFakeConfig("auto_bootstrap"); + try { + config.setFakeConfig("auto_bootstrap", false); + createInstances(); + identity = createInstanceIdentity("az1", "fakeinstance1"); + assertEquals(3, identity.getSeeds().size()); + assertTrue(identity.getSeeds().contains("fakeinstance1")); + } finally { + config.setFakeConfig("auto_bootstrap", previous); + } + } + + @Test + public void testDoubleSlots() throws Exception { createInstances(); - int before = factory.getAllIds("fake-app").size(); - new DoubleRing(config, factory, tokenManager).doubleSlots(); - List lst = factory.getAllIds(config.getAppName()); - // sort it so it will look good if you want to print it. - factory.sort(lst); - for (int i = 0; i < lst.size(); i++) - { + int before = factory.getAllIds(config.getAppName()).size(); + new DoubleRing(config, factory, tokenManager, instanceInfo).doubleSlots(); + ImmutableList lst = factory.getAllIds(config.getAppName()).asList(); + for (int i = 0; i < lst.size(); i++) { System.out.println(lst.get(i)); - if (0 == i % 2) - continue; + if (0 == i % 2) continue; assertEquals(InstanceIdentity.DUMMY_INSTANCE_ID, lst.get(i).getInstanceId()); } assertEquals(before * 2, lst.size()); } @Test - public void testDoubleGrap() throws Exception - { + public void testDoubleGrap() throws Exception { createInstances(); - new DoubleRing(config, factory, tokenManager).doubleSlots(); - config.zone = "az1"; - config.instance_id = "fakeinstancex"; - int hash = tokenManager.regionOffset(config.getDC()); + new DoubleRing(config, factory, tokenManager, instanceInfo).doubleSlots(); + int hash = tokenManager.regionOffset(instanceInfo.getRegion()); identity = createInstanceIdentity("az1", "fakeinstancex"); printInstance(identity.getInstance(), hash); } - public void printInstance(PriamInstance ins, int hash) - { + public void printInstance(PriamInstance ins, int hash) { System.out.println("ID: " + (ins.getId() - hash)); System.out.println("PayLoad: " + ins.getToken()); - } - } diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/InstanceTestUtils.java b/priam/src/test/java/com/netflix/priam/backup/identity/InstanceTestUtils.java index ed9b14677..1dca57e38 100644 --- a/priam/src/test/java/com/netflix/priam/backup/identity/InstanceTestUtils.java +++ b/priam/src/test/java/com/netflix/priam/backup/identity/InstanceTestUtils.java @@ -18,45 +18,35 @@ package com.netflix.priam.backup.identity; import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.identity.FakeMembership; -import com.netflix.priam.identity.FakePriamInstanceFactory; -import com.netflix.priam.identity.IMembership; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.InstanceEnvIdentity; -import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.identity.token.DeadTokenRetriever; -import com.netflix.priam.identity.token.NewTokenRetriever; -import com.netflix.priam.identity.token.PreGeneratedTokenRetriever; +import com.netflix.priam.identity.*; +import com.netflix.priam.identity.config.FakeInstanceInfo; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.identity.token.ITokenRetriever; +import com.netflix.priam.identity.token.TokenRetriever; import com.netflix.priam.utils.FakeSleeper; import com.netflix.priam.utils.ITokenManager; import com.netflix.priam.utils.Sleeper; import com.netflix.priam.utils.TokenManager; - -import org.junit.Before; -import org.junit.Ignore; - import java.util.ArrayList; import java.util.List; +import org.junit.Before; +import org.junit.Ignore; @Ignore -public abstract class InstanceTestUtils -{ +public abstract class InstanceTestUtils { - List instances = new ArrayList(); - IMembership membership; + private final List instances = new ArrayList<>(); + private IMembership membership; FakeConfiguration config; IPriamInstanceFactory factory; InstanceIdentity identity; - Sleeper sleeper; - DeadTokenRetriever deadTokenRetriever; - PreGeneratedTokenRetriever preGeneratedTokenRetriever; - NewTokenRetriever newTokenRetriever; - ITokenManager tokenManager; - InstanceEnvIdentity insEnvIdentity; + private Sleeper sleeper; + ITokenManager tokenManager; + InstanceInfo instanceInfo; + private final String region = "us-east-1"; @Before - public void setup() - { + public void setup() throws Exception { instances.add("fakeinstance1"); instances.add("fakeinstance2"); instances.add("fakeinstance3"); @@ -68,38 +58,33 @@ public void setup() instances.add("fakeinstance9"); membership = new FakeMembership(instances); - config = new FakeConfiguration("fake", "fake-app", "az1", "fakeinstance1"); + config = new FakeConfiguration("fake-app"); + instanceInfo = new FakeInstanceInfo("fakeinstance1", "az1", region); tokenManager = new TokenManager(config); - factory = new FakePriamInstanceFactory(config); + factory = new FakePriamInstanceFactory(instanceInfo); sleeper = new FakeSleeper(); - this.deadTokenRetriever = new DeadTokenRetriever(factory, membership, config, sleeper, insEnvIdentity); - this.preGeneratedTokenRetriever = new PreGeneratedTokenRetriever(factory, membership, config, sleeper); - this.newTokenRetriever = new NewTokenRetriever(factory, membership, config, sleeper, tokenManager); + identity = createInstanceIdentity(instanceInfo.getRac(), instanceInfo.getInstanceId()); } - public void createInstances() throws Exception - { + public void createInstances() throws Exception { createInstanceIdentity("az1", "fakeinstance1"); createInstanceIdentity("az1", "fakeinstance2"); createInstanceIdentity("az1", "fakeinstance3"); - // try next region + // try next zone createInstanceIdentity("az2", "fakeinstance4"); createInstanceIdentity("az2", "fakeinstance5"); createInstanceIdentity("az2", "fakeinstance6"); - // next region + // next zone createInstanceIdentity("az3", "fakeinstance7"); createInstanceIdentity("az3", "fakeinstance8"); createInstanceIdentity("az3", "fakeinstance9"); } - protected InstanceIdentity createInstanceIdentity(String zone, String instanceId) throws Exception - { - config.zone = zone; - config.instance_id = instanceId; - return new InstanceIdentity(factory, membership, config, sleeper, new TokenManager(config) - , this.deadTokenRetriever - , this.preGeneratedTokenRetriever - , this.newTokenRetriever - ); + InstanceIdentity createInstanceIdentity(String zone, String instanceId) throws Exception { + InstanceInfo newInstanceInfo = new FakeInstanceInfo(instanceId, zone, region); + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, newInstanceInfo, sleeper, tokenManager); + return new InstanceIdentity(factory, membership, config, newInstanceInfo, tokenRetriever); } } diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeDeadTokenRetriever.java b/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeDeadTokenRetriever.java deleted file mode 100755 index 9c213e6b4..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeDeadTokenRetriever.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup.identity.token; - -import com.google.common.collect.ListMultimap; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.identity.token.IDeadTokenRetriever; - -public class FakeDeadTokenRetriever implements IDeadTokenRetriever { - - @Override - public PriamInstance get() throws Exception { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getReplaceIp() { - // TODO Auto-generated method stub - return null; - } - - @Override - public void setLocMap(ListMultimap locMap) { - // TODO Auto-generated method stub - - } - -} diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeNewTokenRetriever.java b/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeNewTokenRetriever.java deleted file mode 100755 index 7f9f300d5..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakeNewTokenRetriever.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup.identity.token; - -import com.google.common.collect.ListMultimap; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.identity.token.INewTokenRetriever; - -public class FakeNewTokenRetriever implements INewTokenRetriever { - - @Override - public PriamInstance get() throws Exception { - // TODO Auto-generated method stub - return null; - } - - @Override - public void setLocMap(ListMultimap locMap) { - // TODO Auto-generated method stub - - } - -} diff --git a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakePreGeneratedTokenRetriever.java b/priam/src/test/java/com/netflix/priam/backup/identity/token/FakePreGeneratedTokenRetriever.java deleted file mode 100755 index 48dc8450d..000000000 --- a/priam/src/test/java/com/netflix/priam/backup/identity/token/FakePreGeneratedTokenRetriever.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.netflix.priam.backup.identity.token; - -import com.google.common.collect.ListMultimap; -import com.netflix.priam.identity.PriamInstance; -import com.netflix.priam.identity.token.IPreGeneratedTokenRetriever; - -public class FakePreGeneratedTokenRetriever implements - IPreGeneratedTokenRetriever { - - @Override - public PriamInstance get() throws Exception { - // TODO Auto-generated method stub - return null; - } - - @Override - public void setLocMap(ListMultimap locMap) { - // TODO Auto-generated method stub - - } - -} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestBackupTTLTask.java b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupTTLTask.java new file mode 100644 index 000000000..74577b11f --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupTTLTask.java @@ -0,0 +1,209 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.backup.FakeBackupFileSystem; +import com.netflix.priam.backup.Status; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.InstanceState; +import com.netflix.priam.utils.BackupFileUtils; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.ParseException; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import mockit.Expectations; +import mockit.Mocked; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +/** Created by aagrawal on 12/17/18. */ +public class TestBackupTTLTask { + + private TestBackupUtils testBackupUtils = new TestBackupUtils(); + private IConfiguration configuration; + private static BackupTTLTask backupTTLService; + private static FakeBackupFileSystem backupFileSystem; + private Provider pathProvider; + private Path[] metas; + private Map allFilesMap = new HashMap<>(); + + public TestBackupTTLTask() { + Injector injector = Guice.createInjector(new BRTestModule()); + configuration = injector.getInstance(IConfiguration.class); + if (backupTTLService == null) backupTTLService = injector.getInstance(BackupTTLTask.class); + if (backupFileSystem == null) + backupFileSystem = injector.getInstance(FakeBackupFileSystem.class); + pathProvider = injector.getProvider(AbstractBackupPath.class); + } + + public void prepTest(int daysForSnapshot) throws Exception { + BackupFileUtils.cleanupDir(Paths.get(configuration.getDataFileLocation())); + Instant current = DateUtil.getInstant(); + + List list = new ArrayList<>(); + List allFiles = new ArrayList<>(); + metas = new Path[3]; + + Instant time = + current.minus( + daysForSnapshot + configuration.getGracePeriodDaysForCompaction() + 1, + ChronoUnit.DAYS); + String file1 = testBackupUtils.createFile("mc-1-Data.db", time); + String file2 = + testBackupUtils.createFile("mc-2-Data.db", time.plus(10, ChronoUnit.MINUTES)); + list.clear(); + list.add(getRemoteFromLocal(file1)); + list.add(getRemoteFromLocal(file2)); + metas[0] = testBackupUtils.createMeta(list, time.plus(20, ChronoUnit.MINUTES)); + allFiles.add(getRemoteFromLocal(file1)); + allFiles.add(getRemoteFromLocal(file2)); + + time = current.minus(daysForSnapshot, ChronoUnit.DAYS); + String file3 = testBackupUtils.createFile("mc-3-Data.db", time); + String file4 = + testBackupUtils.createFile("mc-4-Data.db", time.plus(10, ChronoUnit.MINUTES)); + list.clear(); + list.add(getRemoteFromLocal(file1)); + list.add(getRemoteFromLocal(file4)); + metas[1] = testBackupUtils.createMeta(list, time.plus(20, ChronoUnit.MINUTES)); + allFiles.add(getRemoteFromLocal(file3)); + allFiles.add(getRemoteFromLocal(file4)); + + time = current.minus(daysForSnapshot - 1, ChronoUnit.DAYS); + String file5 = testBackupUtils.createFile("mc-5-Data.db", time); + String file6 = + testBackupUtils.createFile("mc-6-Data.db", time.plus(10, ChronoUnit.MINUTES)); + String file7 = + testBackupUtils.createFile("mc-7-Data.db", time.plus(20, ChronoUnit.MINUTES)); + list.clear(); + list.add(getRemoteFromLocal(file4)); + // list.add(getRemoteFromLocal(file6)); + list.add(getRemoteFromLocal(file7)); + metas[2] = testBackupUtils.createMeta(list, time.plus(40, ChronoUnit.MINUTES)); + allFiles.add(getRemoteFromLocal(file5)); + allFiles.add(getRemoteFromLocal(file6)); + allFiles.add(getRemoteFromLocal(file7)); + + allFiles.stream() + .forEach( + file -> { + Path path = Paths.get(file); + allFilesMap.put(path.toFile().getName(), file); + }); + + for (int i = 0; i < metas.length; i++) { + AbstractBackupPath path = pathProvider.get(); + path.parseLocal(metas[i].toFile(), AbstractBackupPath.BackupFileType.META_V2); + allFiles.add(path.getRemotePath()); + allFilesMap.put("META" + i, path.getRemotePath()); + } + + backupFileSystem.setupTest(allFiles); + } + + private String getRemoteFromLocal(String localPath) throws ParseException { + AbstractBackupPath path = pathProvider.get(); + path.parseLocal(new File(localPath), AbstractBackupPath.BackupFileType.SST_V2); + return path.getRemotePath(); + } + + @After + public void cleanup() { + BackupFileUtils.cleanupDir(Paths.get(configuration.getDataFileLocation())); + backupFileSystem.cleanup(); + } + + private List getAllFiles() { + List remoteFiles = new ArrayList<>(); + backupFileSystem.listFileSystem("", null, null).forEachRemaining(remoteFiles::add); + return remoteFiles; + } + + @Test + public void testTTL() throws Exception { + int daysForSnapshot = configuration.getBackupRetentionDays(); + prepTest(daysForSnapshot); + // Run ttl till 2nd meta file. + backupTTLService.execute(); + + List remoteFiles = getAllFiles(); + + // Confirm the files. + Assert.assertEquals(8, remoteFiles.size()); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-4-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-5-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-6-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-7-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-1-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("META1"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("META2"))); + // Remains because of GRACE PERIOD. + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-3-Data.db"))); + + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("mc-2-Data.db"))); + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("META0"))); + } + + @Test + public void testTTLNext() throws Exception { + int daysForSnapshot = configuration.getBackupRetentionDays() + 1; + prepTest(daysForSnapshot); + // Run ttl till 3rd meta file. + backupTTLService.execute(); + + List remoteFiles = getAllFiles(); + // Confirm the files. + Assert.assertEquals(6, remoteFiles.size()); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-4-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-6-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-7-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("META2"))); + // GRACE PERIOD files. + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-3-Data.db"))); + Assert.assertTrue(remoteFiles.contains(allFilesMap.get("mc-5-Data.db"))); + + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("mc-1-Data.db"))); + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("mc-2-Data.db"))); + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("META0"))); + Assert.assertFalse(remoteFiles.contains(allFilesMap.get("META1"))); + } + + @Test + public void testRestoreMode(@Mocked InstanceState state) throws Exception { + new Expectations() { + { + state.getRestoreStatus().getStatus(); + result = Status.STARTED; + } + }; + backupTTLService.execute(); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestBackupUtils.java b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupUtils.java new file mode 100644 index 000000000..e14253a6f --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupUtils.java @@ -0,0 +1,82 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.common.collect.ImmutableSetMultimap; +import com.google.inject.Guice; +import com.google.inject.Inject; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.IConfiguration; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.List; +import org.apache.commons.io.FileUtils; + +/** Created by aagrawal on 12/17/18. */ +public class TestBackupUtils { + private MetaFileWriterBuilder metaFileWriterBuilder; + private Provider pathProvider; + protected final String keyspace = "keyspace1"; + protected final String columnfamily = "columnfamily1"; + private final String dataDir; + + @Inject + public TestBackupUtils() { + Injector injector = Guice.createInjector(new BRTestModule()); + pathProvider = injector.getProvider(AbstractBackupPath.class); + metaFileWriterBuilder = injector.getInstance(MetaFileWriterBuilder.class); + dataDir = injector.getInstance(IConfiguration.class).getDataFileLocation(); + } + + public Path createMeta(List filesToAdd, Instant snapshotTime) throws IOException { + MetaFileWriterBuilder.DataStep dataStep = + metaFileWriterBuilder.newBuilder().startMetaFileGeneration(snapshotTime); + ImmutableSetMultimap.Builder builder = + ImmutableSetMultimap.builder(); + for (String file : filesToAdd) { + String basename = Paths.get(file).getFileName().toString(); + int lastIndex = basename.lastIndexOf('-'); + lastIndex = lastIndex < 0 ? basename.length() : lastIndex; + String prefix = basename.substring(0, lastIndex); + AbstractBackupPath path = pathProvider.get(); + path.parseRemote(file); + path.setCreationTime(path.getLastModified()); + builder.put(prefix, path); + } + dataStep.addColumnfamilyResult(keyspace, columnfamily, builder.build()); + Path metaPath = dataStep.endMetaFileGeneration().getMetaFilePath(); + metaPath.toFile().setLastModified(snapshotTime.toEpochMilli()); + return metaPath; + } + + public String createFile(String fileName, Instant lastModifiedTime) throws Exception { + Path path = Paths.get(dataDir, keyspace, columnfamily, fileName); + FileUtils.forceMkdirParent(path.toFile()); + try (FileWriter fileWriter = new FileWriter(path.toFile())) { + fileWriter.write(""); + } + path.toFile().setLastModified(lastModifiedTime.toEpochMilli()); + return path.toString(); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestBackupV2Service.java b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupV2Service.java new file mode 100644 index 000000000..064c26b8b --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupV2Service.java @@ -0,0 +1,225 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.AbstractBackup; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.IService; +import com.netflix.priam.identity.token.ITokenRetriever; +import com.netflix.priam.scheduler.PriamScheduler; +import com.netflix.priam.tuner.CassandraTunerService; +import com.netflix.priam.tuner.TuneCassandra; +import com.netflix.priam.utils.BackupFileUtils; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.util.Set; +import mockit.Expectations; +import mockit.Mocked; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.quartz.SchedulerException; + +/** Created by aagrawal on 3/9/19. */ +public class TestBackupV2Service { + private final PriamScheduler scheduler; + private final SnapshotMetaTask snapshotMetaTask; + private final CassandraTunerService cassandraTunerService; + private final ITokenRetriever tokenRetriever; + + public TestBackupV2Service() { + Injector injector = Guice.createInjector(new BRTestModule()); + scheduler = injector.getInstance(PriamScheduler.class); + snapshotMetaTask = injector.getInstance(SnapshotMetaTask.class); + cassandraTunerService = injector.getInstance(CassandraTunerService.class); + tokenRetriever = injector.getInstance(ITokenRetriever.class); + } + + @Before + public void cleanup() throws SchedulerException { + scheduler.getScheduler().clear(); + } + + @Test + public void testBackupDisabled( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + + new Expectations() { + { + backupRestoreConfig.getSnapshotMetaServiceCronExpression(); + result = "-1"; + configuration.getDataFileLocation(); + result = "target/data"; + } + }; + Path dummyDataDirectoryLocation = Paths.get(configuration.getDataFileLocation()); + Instant snapshotInstant = DateUtil.getInstant(); + String snapshotName = snapshotMetaTask.generateSnapshotName(snapshotInstant); + // Create one V2 snapshot. + BackupFileUtils.generateDummyFiles( + dummyDataDirectoryLocation, + 2, + 3, + 3, + AbstractBackup.SNAPSHOT_FOLDER, + snapshotName, + true); + + // Create one V1 snapshot. + String snapshotV1Name = DateUtil.formatInstant(DateUtil.yyyyMMdd, snapshotInstant); + BackupFileUtils.generateDummyFiles( + dummyDataDirectoryLocation, + 2, + 3, + 3, + AbstractBackup.SNAPSHOT_FOLDER, + snapshotV1Name, + false); + + IService backupService = + new BackupV2Service( + configuration, + backupRestoreConfig, + scheduler, + snapshotMetaTask, + cassandraTunerService, + tokenRetriever); + backupService.scheduleService(); + Assert.assertTrue(scheduler.getScheduler().getJobGroupNames().isEmpty()); + + // snapshot V2 name should not be there. + Set backupPaths = + AbstractBackup.getBackupDirectories(configuration, AbstractBackup.SNAPSHOT_FOLDER); + for (Path backupPath : backupPaths) { + Assert.assertFalse(Files.exists(Paths.get(backupPath.toString(), snapshotName))); + Assert.assertTrue(Files.exists(Paths.get(backupPath.toString(), snapshotV1Name))); + } + } + + @Test + public void testBackupEnabled( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + new Expectations() { + { + backupRestoreConfig.getSnapshotMetaServiceCronExpression(); + result = "0 0 0/1 1/1 * ? *"; + backupRestoreConfig.getBackupTTLMonitorPeriodInSec(); + result = 600; + backupRestoreConfig.getBackupVerificationCronExpression(); + result = "0 0 0/1 1/1 * ? *"; + backupRestoreConfig.enableV2Backups(); + result = true; + configuration.isIncrementalBackupEnabled(); + result = true; + configuration.getBackupCronExpression(); + result = "-1"; + } + }; + IService backupService = + new BackupV2Service( + configuration, + backupRestoreConfig, + scheduler, + snapshotMetaTask, + cassandraTunerService, + tokenRetriever); + backupService.scheduleService(); + Assert.assertEquals(4, scheduler.getScheduler().getJobKeys(null).size()); + } + + @Test + public void testBackup( + @Mocked IConfiguration configuration, @Mocked IBackupRestoreConfig backupRestoreConfig) + throws Exception { + new Expectations() { + { + backupRestoreConfig.getSnapshotMetaServiceCronExpression(); + result = "0 0 0/1 1/1 * ? *"; + backupRestoreConfig.getBackupTTLMonitorPeriodInSec(); + result = 600; + backupRestoreConfig.getBackupVerificationCronExpression(); + result = "0 0 0/1 1/1 * ? *"; + configuration.isIncrementalBackupEnabled(); + result = false; + configuration.getDataFileLocation(); + result = "target/data"; + } + }; + IService backupService = + new BackupV2Service( + configuration, + backupRestoreConfig, + scheduler, + snapshotMetaTask, + cassandraTunerService, + tokenRetriever); + backupService.scheduleService(); + Assert.assertEquals(3, scheduler.getScheduler().getJobKeys(null).size()); + } + + @Test + public void updateService( + @Mocked IConfiguration configuration, + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Mocked JMXNodeTool nodeTool, + @Mocked TuneCassandra tuneCassandra) + throws Exception { + new Expectations() { + { + backupRestoreConfig.getSnapshotMetaServiceCronExpression(); + result = "0 0 0/1 1/1 * ? *"; + result = "0 0 0/1 1/1 * ? *"; + result = "-1"; + result = "-1"; + configuration.isIncrementalBackupEnabled(); + result = true; + backupRestoreConfig.enableV2Backups(); + result = true; + backupRestoreConfig.getBackupVerificationCronExpression(); + result = "-1"; + backupRestoreConfig.getBackupTTLMonitorPeriodInSec(); + result = 600; + configuration.getBackupCronExpression(); + result = "-1"; + } + }; + IService backupService = + new BackupV2Service( + configuration, + backupRestoreConfig, + scheduler, + snapshotMetaTask, + cassandraTunerService, + tokenRetriever); + backupService.scheduleService(); + Assert.assertEquals(3, scheduler.getScheduler().getJobKeys(null).size()); + + backupService.onChangeUpdateService(); + Assert.assertEquals(0, scheduler.getScheduler().getJobKeys(null).size()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestBackupVerificationTask.java b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupVerificationTask.java new file mode 100644 index 000000000..390307eca --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestBackupVerificationTask.java @@ -0,0 +1,186 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.common.collect.ImmutableList; +import com.google.common.truth.Truth; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.*; +import com.netflix.priam.health.InstanceState; +import com.netflix.priam.merics.Metrics; +import com.netflix.priam.notification.BackupNotificationMgr; +import com.netflix.priam.scheduler.UnsupportedTypeException; +import com.netflix.priam.utils.DateUtil.DateRange; +import com.netflix.spectator.api.Counter; +import com.netflix.spectator.api.Registry; +import java.time.Instant; +import java.util.List; +import java.util.Optional; +import javax.inject.Inject; +import mockit.*; +import org.junit.Before; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; + +/** Created by aagrawal on 2/1/19. */ +public class TestBackupVerificationTask { + @Inject private BackupVerificationTask backupVerificationService; + private Counter badVerifications; + @Mocked private BackupVerification backupVerification; + @Mocked private BackupNotificationMgr backupNotificationMgr; + + @Before + public void setUp() { + new MockBackupVerification(); + new MockBackupNotificationMgr(); + Injector injector = Guice.createInjector(new BRTestModule()); + injector.injectMembers(this); + badVerifications = + injector.getInstance(Registry.class) + .counter(Metrics.METRIC_PREFIX + "backup.verification.failure"); + } + + private static final class MockBackupVerification extends MockUp { + private static boolean throwError; + private static ImmutableList results; + + public static void setResults(BackupVerificationResult... newResults) { + results = ImmutableList.copyOf(newResults); + } + + public static void shouldThrow(boolean newThrowError) { + throwError = newThrowError; + } + + @Mock + public List verifyAllBackups( + BackupVersion backupVersion, DateRange dateRange) + throws UnsupportedTypeException, IllegalArgumentException { + if (throwError) throw new IllegalArgumentException("DummyError"); + return results; + } + + @Mock + public Optional verifyBackup( + BackupVersion backupVersion, boolean force, DateRange dateRange) + throws UnsupportedTypeException, IllegalArgumentException { + if (throwError) throw new IllegalArgumentException("DummyError"); + return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); + } + } + + private static final class MockBackupNotificationMgr extends MockUp {} + + @Test + public void throwError() { + MockBackupVerification.shouldThrow(true); + Assertions.assertThrows( + IllegalArgumentException.class, () -> backupVerificationService.execute()); + } + + @Test + public void validBackups() throws Exception { + MockBackupVerification.shouldThrow(false); + MockBackupVerification.setResults(getValidBackupVerificationResult()); + backupVerificationService.execute(); + Truth.assertThat(badVerifications.count()).isEqualTo(0); + new Verifications() { + { + backupNotificationMgr.notify((BackupVerificationResult) any); + times = 1; + } + }; + } + + @Test + public void invalidBackups() throws Exception { + MockBackupVerification.shouldThrow(false); + MockBackupVerification.setResults(getInvalidBackupVerificationResult()); + backupVerificationService.execute(); + Truth.assertThat(badVerifications.count()).isEqualTo(0); + new Verifications() { + { + backupNotificationMgr.notify((BackupVerificationResult) any); + times = 1; + } + }; + } + + @Test + public void noBackups() throws Exception { + MockBackupVerification.shouldThrow(false); + MockBackupVerification.setResults(); + backupVerificationService.execute(); + Truth.assertThat(badVerifications.count()).isEqualTo(1); + new Verifications() { + { + backupNotificationMgr.notify((BackupVerificationResult) any); + maxTimes = 0; + } + }; + } + + @Test + public void testRestoreMode(@Mocked InstanceState state) throws Exception { + new Expectations() { + { + state.getRestoreStatus().getStatus(); + result = Status.STARTED; + } + }; + backupVerificationService.execute(); + Truth.assertThat(badVerifications.count()).isEqualTo(0); + new Verifications() { + { + backupVerification.verifyBackup((BackupVersion) any, anyBoolean, (DateRange) any); + maxTimes = 0; + } + + { + backupVerification.verifyAllBackups((BackupVersion) any, (DateRange) any); + maxTimes = 0; + } + + { + backupNotificationMgr.notify((BackupVerificationResult) any); + maxTimes = 0; + } + }; + } + + private static BackupVerificationResult getInvalidBackupVerificationResult() { + BackupVerificationResult result = new BackupVerificationResult(); + result.valid = false; + result.manifestAvailable = true; + result.remotePath = "some_random"; + result.filesMatched = 123; + result.snapshotInstant = Instant.EPOCH; + return result; + } + + private static BackupVerificationResult getValidBackupVerificationResult() { + BackupVerificationResult result = new BackupVerificationResult(); + result.valid = true; + result.manifestAvailable = true; + result.remotePath = "some_random"; + result.filesMatched = 123; + result.snapshotInstant = Instant.EPOCH; + return result; + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestFileUploadResult.java b/priam/src/test/java/com/netflix/priam/backupv2/TestFileUploadResult.java new file mode 100644 index 000000000..49c45adfa --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestFileUploadResult.java @@ -0,0 +1,73 @@ +package com.netflix.priam.backupv2; + +import com.google.common.truth.Truth; +import java.nio.file.Paths; +import java.time.Instant; +import org.junit.Test; + +/** unit tests of FileUploadResult */ +public class TestFileUploadResult { + + @Test + public void standardInput() { + Truth.assertThat(getResult().toString()) + .isEqualTo( + "{\n" + + " \"fileName\": \"path\",\n" + + " \"lastModifiedTime\": 200000,\n" + + " \"fileCreationTime\": 100000,\n" + + " \"fileSizeOnDisk\": 100000,\n" + + " \"compression\": \"SNAPPY\",\n" + + " \"encryption\": \"PLAINTEXT\"\n" + + "}"); + } + + @Test + public void setIsUploaded() { + FileUploadResult result = getResult(); + result.setUploaded(true); + Truth.assertThat(result.toString()) + .isEqualTo( + "{\n" + + " \"fileName\": \"path\",\n" + + " \"lastModifiedTime\": 200000,\n" + + " \"fileCreationTime\": 100000,\n" + + " \"fileSizeOnDisk\": 100000,\n" + + " \"compression\": \"SNAPPY\",\n" + + " \"encryption\": \"PLAINTEXT\",\n" + + " \"isUploaded\": true\n" + + "}"); + } + + @Test + public void setBackupPath() { + FileUploadResult result = getResult(); + result.setBackupPath("foo"); + Truth.assertThat(result.toString()) + .isEqualTo( + "{\n" + + " \"fileName\": \"path\",\n" + + " \"lastModifiedTime\": 200000,\n" + + " \"fileCreationTime\": 100000,\n" + + " \"fileSizeOnDisk\": 100000,\n" + + " \"compression\": \"SNAPPY\",\n" + + " \"encryption\": \"PLAINTEXT\",\n" + + " \"backupPath\": \"foo\"\n" + + "}"); + } + + @Test + public void getBackupPath() { + FileUploadResult result = getResult(); + result.setBackupPath("foo"); + Truth.assertThat(result.getBackupPath()).isEqualTo("foo"); + } + + private FileUploadResult getResult() { + return new FileUploadResult( + Paths.get("/my/file/path"), + Instant.ofEpochMilli(200000L), + Instant.ofEpochMilli(100000L), + 100000L); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestForgottenFileManager.java b/priam/src/test/java/com/netflix/priam/backupv2/TestForgottenFileManager.java new file mode 100644 index 000000000..869891d99 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestForgottenFileManager.java @@ -0,0 +1,235 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.merics.BackupMetrics; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** Created by aagrawal on 1/1/19. */ +public class TestForgottenFileManager { + private ForgottenFilesManager forgottenFilesManager; + private TestBackupUtils testBackupUtils; + private ForgottenFilesConfiguration configuration; + private List allFiles = new ArrayList<>(); + private Instant snapshotInstant; + private Path snapshotDir; + + public TestForgottenFileManager() { + Injector injector = Guice.createInjector(new BRTestModule()); + BackupMetrics backupMetrics = injector.getInstance(BackupMetrics.class); + configuration = new ForgottenFilesConfiguration(); + forgottenFilesManager = new ForgottenFilesManager(configuration, backupMetrics); + testBackupUtils = injector.getInstance(TestBackupUtils.class); + } + + @Before + public void prep() throws Exception { + cleanup(); + Instant now = DateUtil.getInstant(); + snapshotInstant = now; + Path file1 = Paths.get(testBackupUtils.createFile("file1", now.minus(10, ChronoUnit.DAYS))); + Path file2 = Paths.get(testBackupUtils.createFile("file2", now.minus(8, ChronoUnit.DAYS))); + Path file3 = Paths.get(testBackupUtils.createFile("file3", now.minus(6, ChronoUnit.DAYS))); + Path file4 = Paths.get(testBackupUtils.createFile("file4", now.minus(4, ChronoUnit.DAYS))); + Path file5 = Paths.get(testBackupUtils.createFile("file5", now.minus(1, ChronoUnit.DAYS))); + Path file6 = + Paths.get( + testBackupUtils.createFile( + "tmplink-lb-59516-big-Index.db", now.minus(3, ChronoUnit.DAYS))); + Path file7 = + Paths.get(testBackupUtils.createFile("file7.tmp", now.minus(3, ChronoUnit.DAYS))); + + allFiles.add(file1); + allFiles.add(file2); + allFiles.add(file3); + allFiles.add(file4); + allFiles.add(file5); + allFiles.add(file6); + allFiles.add(file7); + + // Create a snapshot with file2, file3, file4. + Path columnfamilyDir = file1.getParent(); + snapshotDir = + Paths.get( + columnfamilyDir.toString(), + "snapshot", + "snap_v2_" + DateUtil.formatInstant(DateUtil.yyyyMMddHHmm, now)); + snapshotDir.toFile().mkdirs(); + Files.createLink(Paths.get(snapshotDir.toString(), file2.getFileName().toString()), file2); + Files.createLink(Paths.get(snapshotDir.toString(), file3.getFileName().toString()), file3); + Files.createLink(Paths.get(snapshotDir.toString(), file4.getFileName().toString()), file4); + } + + @After + public void cleanup() throws Exception { + String dataDir = configuration.getDataFileLocation(); + org.apache.commons.io.FileUtils.cleanDirectory(new File(dataDir)); + } + + @Test + public void testMoveForgottenFiles() throws IOException, InterruptedException { + Collection files = allFiles.stream().map(Path::toFile).collect(Collectors.toList()); + Path lostFoundDir = + Paths.get(configuration.getDataFileLocation(), forgottenFilesManager.LOST_FOUND); + + // Lets create some extra symlinks in the LOST_FOUND folder. They should not exist anymore + Path randomSymlink = Paths.get(lostFoundDir.toFile().getAbsolutePath(), "random"); + Files.createDirectory(lostFoundDir); + Files.createSymbolicLink(randomSymlink, lostFoundDir); + + forgottenFilesManager.moveForgottenFiles( + new File(configuration.getDataFileLocation()), files); + + // Extra symlinks are deleted. + Assert.assertFalse(Files.exists(randomSymlink)); + + // Symlinks are created for all the files. They are not moved yet. + Collection symlinkFiles = FileUtils.listFiles(lostFoundDir.toFile(), null, false); + Assert.assertEquals(allFiles.size(), symlinkFiles.size()); + for (Path file : allFiles) { + Path symlink = Paths.get(lostFoundDir.toString(), file.getFileName().toString()); + Assert.assertTrue(symlinkFiles.contains(symlink.toFile())); + Assert.assertTrue(Files.isSymbolicLink(symlink)); + Assert.assertTrue(Files.exists(file)); + } + + // Lets change the configuration and try again!! + configuration.setGracePeriodForgottenFileInDaysForRead(0); + forgottenFilesManager.moveForgottenFiles( + new File(configuration.getDataFileLocation()), files); + Collection movedFiles = FileUtils.listFiles(lostFoundDir.toFile(), null, false); + Assert.assertEquals(allFiles.size(), movedFiles.size()); + movedFiles + .stream() + .forEach( + file -> { + Assert.assertTrue( + Files.isRegularFile(Paths.get(file.getAbsolutePath()))); + }); + allFiles.stream() + .forEach( + file -> { + Assert.assertFalse(file.toFile().exists()); + }); + + configuration.setGracePeriodForgottenFileInDaysForRead( + ForgottenFilesConfiguration.DEFAULT_GRACE_PERIOD); + } + + @Test + public void getColumnfamilyFiles() { + + Path columnfamilyDir = allFiles.get(0).getParent(); + Collection columnfamilyFiles = + forgottenFilesManager.getColumnfamilyFiles( + snapshotInstant, columnfamilyDir.toFile()); + Assert.assertEquals(3, columnfamilyFiles.size()); + Assert.assertTrue(columnfamilyFiles.contains(allFiles.get(0).toFile())); + Assert.assertTrue(columnfamilyFiles.contains(allFiles.get(1).toFile())); + Assert.assertTrue(columnfamilyFiles.contains(allFiles.get(2).toFile())); + } + + @Test + public void findAndMoveForgottenFiles() { + Path lostFoundDir = + Paths.get(allFiles.get(0).getParent().toString(), forgottenFilesManager.LOST_FOUND); + forgottenFilesManager.findAndMoveForgottenFiles(snapshotInstant, snapshotDir.toFile()); + + // Only one potential forgotten file - file1. It will be symlink here. + Collection movedFiles = FileUtils.listFiles(lostFoundDir.toFile(), null, false); + Assert.assertEquals(1, movedFiles.size()); + Assert.assertTrue( + movedFiles + .iterator() + .next() + .getName() + .equals(allFiles.get(0).getFileName().toString())); + Assert.assertTrue( + Files.isSymbolicLink(Paths.get(movedFiles.iterator().next().getAbsolutePath()))); + + // All files still remain in columnfamily dir. + Collection cfFiles = + FileUtils.listFiles(new File(allFiles.get(0).getParent().toString()), null, false); + Assert.assertEquals(allFiles.size(), cfFiles.size()); + + // Snapshot is untouched. + Collection snapshotFiles = FileUtils.listFiles(snapshotDir.toFile(), null, false); + Assert.assertEquals(3, snapshotFiles.size()); + + // Lets change the configuration and try again!! + configuration.setGracePeriodForgottenFileInDaysForRead(0); + forgottenFilesManager.findAndMoveForgottenFiles(snapshotInstant, snapshotDir.toFile()); + configuration.setGracePeriodForgottenFileInDaysForRead( + ForgottenFilesConfiguration.DEFAULT_GRACE_PERIOD); + movedFiles = FileUtils.listFiles(lostFoundDir.toFile(), null, false); + Assert.assertEquals(1, movedFiles.size()); + Assert.assertTrue( + Files.isRegularFile(Paths.get(movedFiles.iterator().next().getAbsolutePath()))); + cfFiles = + FileUtils.listFiles(new File(allFiles.get(0).getParent().toString()), null, false); + Assert.assertEquals(6, cfFiles.size()); + int temp_file_name = 1; + for (File file : cfFiles) { + file.getName().equals(allFiles.get(temp_file_name++).getFileName().toString()); + } + + // Snapshot is untouched. + snapshotFiles = FileUtils.listFiles(snapshotDir.toFile(), null, false); + Assert.assertEquals(3, snapshotFiles.size()); + } + + private class ForgottenFilesConfiguration extends FakeConfiguration { + protected static final int DEFAULT_GRACE_PERIOD = 3; + private int gracePeriodForgottenFileInDaysForRead = DEFAULT_GRACE_PERIOD; + + @Override + public boolean isForgottenFileMoveEnabled() { + return true; + } + + @Override + public int getForgottenFileGracePeriodDaysForRead() { + return gracePeriodForgottenFileInDaysForRead; + } + + public void setGracePeriodForgottenFileInDaysForRead( + int gracePeriodForgottenFileInDaysForRead) { + this.gracePeriodForgottenFileInDaysForRead = gracePeriodForgottenFileInDaysForRead; + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestMetaV2Proxy.java b/priam/src/test/java/com/netflix/priam/backupv2/TestMetaV2Proxy.java new file mode 100644 index 000000000..68d56d895 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestMetaV2Proxy.java @@ -0,0 +1,287 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.backupv2; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.backup.BackupRestoreException; +import com.netflix.priam.backup.FakeBackupFileSystem; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.utils.DateUtil; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +/** Created by aagrawal on 12/5/18. */ +public class TestMetaV2Proxy { + private FakeBackupFileSystem fs; + private IConfiguration configuration; + private TestBackupUtils backupUtils; + private IMetaProxy metaProxy; + private Provider abstractBackupPathProvider; + + public TestMetaV2Proxy() { + Injector injector = Guice.createInjector(new BRTestModule()); + configuration = injector.getInstance(IConfiguration.class); + fs = injector.getInstance(FakeBackupFileSystem.class); + fs.setupTest(getRemoteFakeFiles()); + backupUtils = new TestBackupUtils(); + metaProxy = injector.getInstance(MetaV2Proxy.class); + abstractBackupPathProvider = injector.getProvider(AbstractBackupPath.class); + } + + @Test + public void testMetaPrefix() { + // Null date range + Assert.assertEquals(getPrefix() + "/META_V2", metaProxy.getMetaPrefix(null)); + Instant now = Instant.now(); + // No end date. + Assert.assertEquals( + getPrefix() + "/META_V2/" + now.toEpochMilli(), + metaProxy.getMetaPrefix(new DateUtil.DateRange(now, null))); + // No start date + Assert.assertEquals( + getPrefix() + "/META_V2", + metaProxy.getMetaPrefix(new DateUtil.DateRange(null, Instant.now()))); + long start = 1834567890L; + long end = 1834877776L; + Assert.assertEquals( + getPrefix() + "/META_V2/1834", + metaProxy.getMetaPrefix( + new DateUtil.DateRange( + Instant.ofEpochSecond(start), Instant.ofEpochSecond(end)))); + } + + @Test + public void testIsMetaFileValid() throws Exception { + Instant snapshotInstant = DateUtil.getInstant(); + Path metaPath = backupUtils.createMeta(getRemoteFakeFiles(), snapshotInstant); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(metaPath.toFile(), AbstractBackupPath.BackupFileType.META_V2); + + Assert.assertTrue(metaProxy.isMetaFileValid(abstractBackupPath).valid); + FileUtils.deleteQuietly(metaPath.toFile()); + + List fileToAdd = getRemoteFakeFiles(); + fileToAdd.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859817645000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "file9.Data.db") + .toString()); + + metaPath = backupUtils.createMeta(fileToAdd, snapshotInstant); + Assert.assertFalse(metaProxy.isMetaFileValid(abstractBackupPath).valid); + FileUtils.deleteQuietly(metaPath.toFile()); + + metaPath = Paths.get(configuration.getDataFileLocation(), "meta_v2_201801010000.json"); + Assert.assertFalse(metaProxy.isMetaFileValid(abstractBackupPath).valid); + } + + @Test + public void testGetSSTFilesFromMeta() throws Exception { + Instant snapshotInstant = DateUtil.getInstant(); + List remoteFiles = getRemoteFakeFiles(); + Path metaPath = backupUtils.createMeta(remoteFiles, snapshotInstant); + List filesFromMeta = metaProxy.getSSTFilesFromMeta(metaPath); + filesFromMeta.removeAll(remoteFiles); + Assert.assertTrue(filesFromMeta.isEmpty()); + } + + @Test + public void testGetIncrementalFiles() throws Exception { + DateUtil.DateRange dateRange = new DateUtil.DateRange("202812071820,20281229"); + Iterator incrementals = metaProxy.getIncrementals(dateRange); + int i = 0; + while (incrementals.hasNext()) { + System.out.println(incrementals.next()); + i++; + } + Assert.assertEquals(3, i); + } + + @Test + public void testFindMetaFiles() throws BackupRestoreException { + List metas = + metaProxy.findMetaFiles( + new DateUtil.DateRange( + Instant.ofEpochMilli(1859824860000L), + Instant.ofEpochMilli(1859828420000L))); + Assert.assertEquals(1, metas.size()); + Assert.assertEquals("meta_v2_202812071801.json", metas.get(0).getFileName()); + Assert.assertTrue(fs.doesRemoteFileExist(Paths.get(metas.get(0).getRemotePath()))); + + metas = + metaProxy.findMetaFiles( + new DateUtil.DateRange( + Instant.ofEpochMilli(1859824860000L), + Instant.ofEpochMilli(1859828460000L))); + Assert.assertEquals(2, metas.size()); + } + + @Test + public void testFindLatestValidMetaFile() {} + + private String getPrefix() { + return "casstestbackup/1049_fake-app/1808575600"; + } + + private List getRemoteFakeFiles() { + List files = new ArrayList<>(); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859817645000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "file1-Data.db")); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859818845000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "file2-Data.db")); + + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.META_V2.toString(), + "1859824860000", + "SNAPPY", + "PLAINTEXT", + "meta_v2_202812071801.json")); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859826045000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "manifest")); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859828410000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "file3-Data.db")); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.SST_V2.toString(), + "1859828420000", + "keyspace1", + "columnfamily1", + "SNAPPY", + "PLAINTEXT", + "file4-Data.db")); + files.add( + Paths.get( + getPrefix(), + AbstractBackupPath.BackupFileType.META_V2.toString(), + "1859828460000", + "SNAPPY", + "PLAINTEXT", + "meta_v2_202812071901.json")); + return files.stream().map(Path::toString).collect(Collectors.toList()); + } + + @After + public void cleanup() throws IOException { + FileUtils.cleanDirectory(new File(configuration.getDataFileLocation())); + } + + @Test + public void testCleanupOldMetaFiles() throws IOException { + generateDummyMetaFiles(); + Path dataDir = Paths.get(configuration.getDataFileLocation()); + Assert.assertEquals(4, dataDir.toFile().listFiles().length); + + // clean the directory + metaProxy.cleanupOldMetaFiles(); + + Assert.assertEquals(1, dataDir.toFile().listFiles().length); + Path dummy = Paths.get(dataDir.toString(), "dummy.tmp"); + Assert.assertTrue(dummy.toFile().exists()); + } + + private void generateDummyMetaFiles() throws IOException { + Path dataDir = Paths.get(configuration.getDataFileLocation()); + FileUtils.cleanDirectory(dataDir.toFile()); + FileUtils.write( + Paths.get( + configuration.getDataFileLocation(), + MetaFileInfo.getMetaFileName(DateUtil.getInstant())) + .toFile(), + "dummy", + "UTF-8"); + + FileUtils.write( + Paths.get( + configuration.getDataFileLocation(), + MetaFileInfo.getMetaFileName( + DateUtil.getInstant().minus(10, ChronoUnit.MINUTES))) + .toFile(), + "dummy", + "UTF-8"); + + FileUtils.write( + Paths.get( + configuration.getDataFileLocation(), + MetaFileInfo.getMetaFileName(DateUtil.getInstant()) + ".tmp") + .toFile(), + "dummy", + "UTF-8"); + + FileUtils.write( + Paths.get(configuration.getDataFileLocation(), "dummy.tmp").toFile(), + "dummy", + "UTF-8"); + } +} diff --git a/priam/src/test/java/com/netflix/priam/backupv2/TestSnapshotMetaTask.java b/priam/src/test/java/com/netflix/priam/backupv2/TestSnapshotMetaTask.java new file mode 100644 index 000000000..781282c79 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/backupv2/TestSnapshotMetaTask.java @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.backupv2; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.AbstractBackup; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.scheduler.TaskTimer; +import com.netflix.priam.utils.BackupFileUtils; +import com.netflix.priam.utils.DateUtil; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Instant; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Created by aagrawal on 6/20/18. */ +public class TestSnapshotMetaTask { + private static final Logger logger = + LoggerFactory.getLogger(TestSnapshotMetaTask.class.getName()); + private static Path dummyDataDirectoryLocation; + private final IConfiguration configuration; + private final IBackupRestoreConfig backupRestoreConfig; + private final SnapshotMetaTask snapshotMetaService; + private final TestMetaFileReader metaFileReader; + private final InstanceInfo instanceInfo; + + public TestSnapshotMetaTask() { + Injector injector = Guice.createInjector(new BRTestModule()); + + configuration = injector.getInstance(IConfiguration.class); + backupRestoreConfig = injector.getInstance(IBackupRestoreConfig.class); + snapshotMetaService = injector.getInstance(SnapshotMetaTask.class); + metaFileReader = new TestMetaFileReader(); + instanceInfo = injector.getInstance(InstanceInfo.class); + } + + @Before + public void setUp() { + dummyDataDirectoryLocation = Paths.get(configuration.getDataFileLocation()); + BackupFileUtils.cleanupDir(dummyDataDirectoryLocation); + } + + @Test + public void testSnapshotMetaServiceEnabled() throws Exception { + TaskTimer taskTimer = SnapshotMetaTask.getTimer(backupRestoreConfig); + Assert.assertNotNull(taskTimer); + } + + @Test + public void testMetaFileName() throws Exception { + String fileName = MetaFileInfo.getMetaFileName(DateUtil.getInstant()); + Path path = Paths.get(dummyDataDirectoryLocation.toFile().getAbsolutePath(), fileName); + Assert.assertTrue(metaFileReader.isValidMetaFile(path)); + path = Paths.get(dummyDataDirectoryLocation.toFile().getAbsolutePath(), fileName + ".tmp"); + Assert.assertFalse(metaFileReader.isValidMetaFile(path)); + } + + private void test(int noOfSstables, int noOfKeyspaces, int noOfCf) throws Exception { + Instant snapshotInstant = DateUtil.getInstant(); + String snapshotName = snapshotMetaService.generateSnapshotName(snapshotInstant); + BackupFileUtils.generateDummyFiles( + dummyDataDirectoryLocation, + noOfKeyspaces, + noOfCf, + noOfSstables, + AbstractBackup.SNAPSHOT_FOLDER, + snapshotName, + true); + snapshotMetaService.setSnapshotName(snapshotName); + Path metaFileLocation = + snapshotMetaService.processSnapshot(snapshotInstant).getMetaFilePath(); + Assert.assertNotNull(metaFileLocation); + Assert.assertTrue(metaFileLocation.toFile().exists()); + Assert.assertTrue(metaFileLocation.toFile().isFile()); + Assert.assertEquals( + snapshotInstant.getEpochSecond(), + (metaFileLocation.toFile().lastModified() / 1000)); + + // Try reading meta file. + metaFileReader.setNoOfSstables(noOfSstables + 1); + metaFileReader.readMeta(metaFileLocation); + + MetaFileInfo metaFileInfo = metaFileReader.getMetaFileInfo(); + Assert.assertEquals(1, metaFileInfo.getVersion()); + Assert.assertEquals(configuration.getAppName(), metaFileInfo.getAppName()); + Assert.assertEquals(instanceInfo.getRac(), metaFileInfo.getRack()); + Assert.assertEquals(instanceInfo.getRegion(), metaFileInfo.getRegion()); + + // Cleanup + metaFileLocation.toFile().delete(); + BackupFileUtils.cleanupDir(dummyDataDirectoryLocation); + } + + @Test + public void testMetaFile() throws Exception { + test(5, 1, 1); + } + + @Test + public void testSize() throws Exception { + test(1000, 2, 2); + } + + static class TestMetaFileReader extends MetaFileReader { + + private int noOfSstables; + + void setNoOfSstables(int noOfSstables) { + this.noOfSstables = noOfSstables; + } + + @Override + public void process(ColumnFamilyResult columnfamilyResult) { + Assert.assertEquals(noOfSstables, columnfamilyResult.getSstables().size()); + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/cassandra/token/TestDoublingLogic.java b/priam/src/test/java/com/netflix/priam/cassandra/token/TestDoublingLogic.java index ddd7970ea..295e127e3 100644 --- a/priam/src/test/java/com/netflix/priam/cassandra/token/TestDoublingLogic.java +++ b/priam/src/test/java/com/netflix/priam/cassandra/token/TestDoublingLogic.java @@ -23,101 +23,82 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.StringUtils; import org.junit.Test; -/** - * Seems like skip 3 is the magic number.... this test will make sure to test the same. - * - */ -public class TestDoublingLogic -{ +/** Seems like skip 3 is the magic number.... this test will make sure to test the same. */ +public class TestDoublingLogic { private static final int RACS = 2; private static final int NODES_PER_RACS = 2; @Test - public void testSkip() - { - List nodes = new ArrayList(); + public void testSkip() { + List nodes = new ArrayList<>(); for (int i = 0; i < NODES_PER_RACS; i++) - for (int j = 0; j < RACS; j++) - nodes.add("RAC-" + j); - //printNodes(nodes); - + for (int j = 0; j < RACS; j++) nodes.add("RAC-" + j); + // printNodes(nodes); + List newNodes = nodes; - for (int i = 0; i < 15; i++) - { + for (int i = 0; i < 15; i++) { int count = newNodes.size(); newNodes = doubleNodes(newNodes); - assertEquals(newNodes.size(), count *2); + assertEquals(newNodes.size(), count * 2); // printNodes(newNodes); validate(newNodes, nodes); } } - public void printNodes(List nodes) - { + public void printNodes(List nodes) { System.out.println("=== Printing - Array of Size :" + nodes.size()); System.out.println(StringUtils.join(nodes, "\n")); - System.out.println("=====================Completed doubling===============================" + nodes.size()); + System.out.println( + "=====================Completed doubling===============================" + + nodes.size()); } - private void validate(List newNodes, List nodes) - { + private void validate(List newNodes, List nodes) { String temp = ""; int count = 0; - for (String node : newNodes) - { - if (temp.equals(node)) - count++; - else - count = 0; - - if (count == 2) - { + for (String node : newNodes) { + if (temp.equals(node)) count++; + else count = 0; + + if (count == 2) { System.out.println("Found an issue....."); throw new RuntimeException(); } - temp = node; + temp = node; } - + // compare if they are the same set... boolean test = true; - for (int i = 0; i < nodes.size(); i++) - { - if (!newNodes.get(i).equals(nodes.get(i))) - test = false; + for (int i = 0; i < nodes.size(); i++) { + if (!newNodes.get(i).equals(nodes.get(i))) test = false; } if (test) - throw new RuntimeException("Awesome we are back to the natural order... No need to test more"); + throw new RuntimeException( + "Awesome we are back to the natural order... No need to test more"); } - private List doubleNodes(List nodes) - { - List lst = new ArrayList(); - Map return_ = new HashMap(); - for (int i = 0; i < nodes.size(); i++) - { + private List doubleNodes(List nodes) { + List lst = new ArrayList<>(); + Map return_ = new HashMap<>(); + for (int i = 0; i < nodes.size(); i++) { return_.put(i * 2, nodes.get(i)); } - for (int i = 0; i < nodes.size() * 2; i++) - { - if (0 == i % 2) - { - - //rotate - if (i + 3 >= (nodes.size() * 2)) - { - int delta = (i+3) - (nodes.size() *2); - return_.put(delta, return_.get(i)); + for (int i = 0; i < nodes.size() * 2; i++) { + if (0 == i % 2) { + + // rotate + if (i + 3 >= (nodes.size() * 2)) { + int delta = (i + 3) - (nodes.size() * 2); + return_.put(delta, return_.get(i)); } return_.put(i + 3, return_.get(i)); } } - for (int i = 0; i < nodes.size() * 2; i++) - lst.add(return_.get(i)); + for (int i = 0; i < nodes.size() * 2; i++) lst.add(return_.get(i)); return lst; } diff --git a/priam/src/test/java/com/netflix/priam/config/FakeBackupRestoreConfig.java b/priam/src/test/java/com/netflix/priam/config/FakeBackupRestoreConfig.java index 8a8410926..e78987409 100644 --- a/priam/src/test/java/com/netflix/priam/config/FakeBackupRestoreConfig.java +++ b/priam/src/test/java/com/netflix/priam/config/FakeBackupRestoreConfig.java @@ -1,26 +1,44 @@ /** * Copyright 2018 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * + *

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + *

http://www.apache.org/licenses/LICENSE-2.0 + * + *

Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.priam.config; -/** - * Created by aagrawal on 6/26/18. - */ -public class FakeBackupRestoreConfig implements IBackupRestoreConfig{ +import com.google.common.collect.ImmutableSet; + +/** Created by aagrawal on 6/26/18. */ +public class FakeBackupRestoreConfig implements IBackupRestoreConfig { @Override public String getSnapshotMetaServiceCronExpression() { - return "0 0/2 * 1/1 * ? *"; //Every 2 minutes for testing purposes + return "0 0/2 * 1/1 * ? *"; // Every 2 minutes for testing purposes + } + + @Override + public boolean enableV2Backups() { + return false; + } + + @Override + public boolean enableV2Restore() { + return false; + } + + @Override + public int getBackupTTLMonitorPeriodInSec() { + return 0; // avoids sleeping altogether in tests. + } + + @Override + public ImmutableSet getBackupNotificationAdditionalMessageAttrs() { + return ImmutableSet.of(); } } diff --git a/priam/src/test/java/com/netflix/priam/config/FakeConfiguration.java b/priam/src/test/java/com/netflix/priam/config/FakeConfiguration.java index c17fc2f03..6f855b086 100644 --- a/priam/src/test/java/com/netflix/priam/config/FakeConfiguration.java +++ b/priam/src/test/java/com/netflix/priam/config/FakeConfiguration.java @@ -17,879 +17,284 @@ package com.netflix.priam.config; -import com.google.common.collect.Lists; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.tuner.JVMOption; -import com.netflix.priam.config.PriamConfiguration; -import com.netflix.priam.tuner.GCType; -import com.netflix.priam.identity.config.InstanceDataRetriever; -import com.netflix.priam.identity.config.LocalInstanceDataRetriever; -import com.netflix.priam.scheduler.SchedulerType; -import com.netflix.priam.scheduler.UnsupportedTypeException; - import java.io.File; -import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; @Singleton -public class FakeConfiguration implements IConfiguration -{ +public class FakeConfiguration implements IConfiguration { - public static final String FAKE_REGION = "us-east-1"; + private final String appName; + private String restorePrefix = ""; + public Map fakeConfig; + private String roleManager = ""; + private boolean mayCreateNewToken; + private ImmutableList racs; + private boolean usePrivateIp; + private String diskAccessMode; + private boolean skipDeletingOthersIngressRules; + private boolean skipUpdatingOthersIngressRules; + private boolean skipIngressUnlessIPIsPublic; + private long compressionTransitionEpochMillis; - public String region; - public String appName; - public String zone; - public String instance_id; - public String restorePrefix; + public Map fakeProperties = new HashMap<>(); - public FakeConfiguration() - { - this(FAKE_REGION, "my_fake_cluster", "my_zone", "i-01234567"); + public FakeConfiguration() { + this("my_fake_cluster"); } - public FakeConfiguration(String region, String appName, String zone, String ins_id) - { - this.region = region; + public FakeConfiguration(String appName) { this.appName = appName; - this.zone = zone; - this.instance_id = ins_id; - this.restorePrefix = ""; - } - - @Override - public void intialize() - { - // TODO Auto-generated method stub - - } - - @Override - public String getBackupLocation() - { - // TODO Auto-generated method stub - return "casstestbackup"; - } - - @Override - public String getBackupPrefix() - { - // TODO Auto-generated method stub - return "TEST-netflix.platform.S3"; - } - - @Override - public String getCommitLogLocation() - { - // TODO Auto-generated method stub - return "cass/commitlog"; - } - - @Override - public String getDataFileLocation() - { - // TODO Auto-generated method stub - return "target/data"; + fakeConfig = new HashMap<>(); + fakeConfig.put("auto_bootstrap", false); + this.mayCreateNewToken = true; // matches interface default + this.racs = ImmutableList.of("az1", "az2", "az3"); } - @Override - public String getLogDirLocation() { - return null; + public Object getFakeConfig(String key) { + return fakeConfig.get(key); } - @Override - public String getHintsLocation() { - return "target/hints"; + public void setFakeConfig(String key, Object value) { + fakeConfig.put(key, value); } @Override - public String getCacheLocation() - { - // TODO Auto-generated method stub - return "cass/caches"; + public boolean getAutoBoostrap() { + return (Boolean) fakeConfig.getOrDefault("auto_bootstrap", false); } @Override - public List getRacs() - { - return Arrays.asList("az1", "az2", "az3"); + public String getCassHome() { + return "/tmp/priam"; } @Override - public int getJmxPort() - { - return 7199; - } + public void initialize() {} @Override - public String getJmxUsername() - { - return null; + public String getBackupLocation() { + return "casstestbackup"; } @Override - public String getJmxPassword() - { - return null; + public String getBackupPrefix() { + return "TEST-netflix.platform.S3"; } - /** - * @return Enables Remote JMX connections n C* - */ @Override - public boolean enableRemoteJMX() { - return false; + public String getCassandraBaseDirectory() { + return "target"; } @Override - public int getThriftPort() - { - return 9160; + public List getRacs() { + return racs; } - @Override - public int getNativeTransportPort() - { - return 9042; + public void setRacs(String... racs) { + this.racs = ImmutableList.copyOf(racs); } @Override - public String getSnitch() - { + public String getSnitch() { return "org.apache.cassandra.locator.SimpleSnitch"; } @Override - public String getRac() - { - return this.zone; - } - - @Override - public String getHostname() - { - // TODO Auto-generated method stub - return instance_id; - } - - @Override - public String getInstanceName() - { - return instance_id; - } - - @Override - public String getHeapSize() - { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getHeapNewSize() - { - // TODO Auto-generated method stub - return null; - } - - @Override - public int getBackupHour() - { - // TODO Auto-generated method stub - return 12; - } - - @Override - public String getBackupCronExpression() { - return null; - } - - @Override - public SchedulerType getBackupSchedulerType() { - return SchedulerType.HOUR; - } - - @Override - public String getRestoreSnapshot() - { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getAppName() - { + public String getAppName() { return appName; } @Override - public String getACLGroupName() - { - return this.getAppName(); + public String getRestorePrefix() { + return this.restorePrefix; } - @Override - public int getMaxBackupUploadThreads() - { - // TODO Auto-generated method stub - return 2; + // For testing purposes only. + public void setRestorePrefix(String restorePrefix) { + this.restorePrefix = restorePrefix; } @Override - public String getSDBInstanceIdentityRegion() - { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getDC() - { - // TODO Auto-generated method stub - return this.region; - } - - @Override - public int getMaxBackupDownloadThreads() - { - // TODO Auto-generated method stub - return 3; - } - - public void setRestorePrefix(String prefix) - { - // TODO Auto-generated method stub - restorePrefix = prefix; - } - - @Override - public String getRestorePrefix() - { - // TODO Auto-generated method stub - return restorePrefix; - } - - @Override - public String getBackupCommitLogLocation() - { + public String getBackupCommitLogLocation() { return "cass/backup/cl/"; } @Override - public boolean isMultiDC() - { - // TODO Auto-generated method stub - return false; - } - - @Override - public String getASGName() - { - // TODO Auto-generated method stub - return null; - } - - /** - * Amazon specific setting to query Additional/ Sibling ASG Memberships in csv format to consider while calculating RAC membership - */ - @Override - public String getSiblingASGNames() { - return null; - } - - @Override - public boolean isIncrBackup() - { - return true; - } - - @Override - public String getHostIP() - { - // TODO Auto-generated method stub - return null; - } - - - @Override - public int getUploadThrottle() - { - // TODO Auto-generated method stub - return 0; - } - - @Override - public InstanceDataRetriever getInstanceDataRetriever() throws InstantiationException, IllegalAccessException, ClassNotFoundException { - return new LocalInstanceDataRetriever(); - } - - @Override - public boolean isLocalBootstrapEnabled() { - // TODO Auto-generated method stub - return false; - } - - @Override - public int getInMemoryCompactionLimit() { - return 8; - } - - @Override - public int getCompactionThroughput() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public String getMaxDirectMemory() - { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getBootClusterName() - { - return "cass_bootstrap"; - } - - @Override - public String getCassHome() - { - return "/tmp/priam"; - } - - @Override - public String getCassStartupScript() - { + public String getCassStartupScript() { return "/usr/bin/false"; } - @Override - public List getRestoreKeySpaces() - { - return Lists.newArrayList(); - } - - @Override - public long getBackupChunkSize() - { - return 5L*1024*1024; - } - - @Override - public void setDC(String region) - { - // TODO Auto-generated method stub - - } - - @Override - public boolean isRestoreClosestToken() - { - // TODO Auto-generated method stub - return false; - } - - @Override - public String getCassStopScript() - { - return "true"; - } - - @Override - public int getGracefulDrainHealthWaitSeconds() { - return -1; - } - @Override public int getRemediateDeadCassandraRate() { return 1; } @Override - public int getStoragePort() - { - return 7101; - } - - @Override - public String getSeedProviderName() - { + public String getSeedProviderName() { return "org.apache.cassandra.locator.SimpleSeedProvider"; } @Override - public int getBackupRetentionDays() - { + public int getBackupRetentionDays() { return 5; } @Override - public List getBackupRacs() - { - return Lists.newArrayList(); - } - - public int getMaxHintWindowInMS() - { - return 36000; - } - - public int getHintedHandoffThrottleKb() - { - return 1024; - } - - public int getMaxHintThreads() - { - return 1; - } - - - - /** - * @return memtable_cleanup_threshold in C* yaml - */ - @Override - public double getMemtableCleanupThreshold() { - return 0.11; - } - - @Override - public int getStreamingThroughputMB() - { - return 400; - } - - - public String getPartitioner() { - return "org.apache.cassandra.dht.RandomPartitioner"; - } - - @Override - public int getSSLStoragePort() - { - // TODO Auto-generated method stub - return 7103; - } - - public String getKeyCacheSizeInMB() - { - return "16"; - } - - public String getKeyCacheKeysToSave() - { - return "32"; - } - - public String getRowCacheSizeInMB() - { - return "4"; - } - - public String getRowCacheKeysToSave() - { - return "4"; - } - - @Override - public String getCassProcessName() { - return "CassandraDaemon"; - } - - public int getNumTokens() - { - return 1; - } - - public String getYamlLocation() - { - return "conf/cassandra.yaml"; + public String getYamlLocation() { + return getCassHome() + "/conf/cassandra.yaml"; } @Override public String getJVMOptionsFileLocation() { - return "src/test/resources/conf/jvm.options"; - } - - @Override - public GCType getGCType() throws UnsupportedTypeException { - return GCType.CMS; - } - - @Override - public Map getJVMExcludeSet() { - return null; - } - - @Override - public Map getJVMUpsertSet() { - return null; + return "src/test/resources/conf/jvm-server.options"; } - public String getAuthenticator() - { - return PriamConfiguration.DEFAULT_AUTHENTICATOR; - } - - public String getAuthorizer() { - return PriamConfiguration.DEFAULT_AUTHORIZER; - } - public boolean doesCassandraStartManually() { - return false; + public String getCommitLogBackupPropsFile() { + return getCassHome() + "/conf/commitlog_archiving.properties"; } - @Override - public boolean isVpcRing() { - return false; - } - - public String getInternodeCompression() - { - return "all"; - } - - @Override - public boolean isBackingUpCommitLogs() - { - return false; - } - - @Override - public String getCommitLogBackupPropsFile() - { - return getCassHome() + PriamConfiguration.DEFAULT_COMMITLOG_PROPS_FILE; - } - - @Override - public String getCommitLogBackupArchiveCmd() - { - return null; + public String getCassYamlVal(String priamKey) { + return ""; } @Override - public String getCommitLogBackupRestoreCmd() - { - return null; + public boolean isPostRestoreHookEnabled() { + return true; } @Override - public String getCommitLogBackupRestoreFromDirs() - { - return null; + public String getPostRestoreHook() { + return "echo"; } @Override - public String getCommitLogBackupRestorePointInTime() - { - return null; - } - - public void setRestoreKeySpaces(List keyspaces) { - + public String getPostRestoreHookHeartbeatFileName() { + return System.getProperty("java.io.tmpdir") + File.separator + "postrestorehook.heartbeat"; } @Override - public int maxCommitLogsRestore() { - return 0; - } - - public boolean isClientSslEnabled() - { - return true; - } - - public String getInternodeEncryption() - { - return "all"; - } - - public boolean isDynamicSnitchEnabled() - { - return true; - } - - public boolean isThriftEnabled() - { - return true; - } - - public boolean isNativeTransportEnabled() - { - return false; - } - - public int getConcurrentReadsCnt() - { - return 8; - } - - public int getConcurrentWritesCnt() - { - return 8; - } - - public int getConcurrentCompactorsCnt() - { - return 1; + public String getPostRestoreHookDoneFileName() { + return System.getProperty("java.io.tmpdir") + File.separator + "postrestorehook.done"; } - @Override - public String getRpcServerType() { - return "hsha"; - } - @Override - public int getRpcMinThreads() { - return 16; + public String getProperty(String key, String defaultValue) { + return fakeProperties.getOrDefault(key, defaultValue); } @Override - public int getRpcMaxThreads() { - return 2048; + public String getMergedConfigurationDirectory() { + return fakeProperties.getOrDefault("priam_test_config", "/tmp/priam_test_config"); } - @Override - public int getIndexInterval() { - return 0; - } - @Override - public int getCompactionLargePartitionWarnThresholdInMB() { - return 100; - } - - public String getExtraConfigParams() { - return null; - } - - public String getCassYamlVal(String priamKey) { - return ""; + public ImmutableSet getTunablePropertyFiles() { + String path = new File(getYamlLocation()).getParentFile().getPath(); + return ImmutableSet.of(path + "/cassandra-rackdc.properties"); } @Override - public boolean getAutoBoostrap() { - // TODO Auto-generated method stub - return false; + public String getRoleManager() { + return this.roleManager; } - @Override - public String getDseClusterType() { - // TODO Auto-generated method stub - return "cassandra"; + public FakeConfiguration setRoleManager(String roleManager) { + this.roleManager = roleManager; + return this; } - @Override - public boolean isCreateNewTokenEnable() { - return true; //allow Junit test to create new tokens - } - - @Override - public String getPrivateKeyLocation() { - return null; - } - - @Override - public String getRestoreSourceType() { - return null; - } - - @Override - public boolean isEncryptBackupEnabled() { - return false; - } - - @Override - public boolean isRestoreEncrypted() { - return false; + public String getRAC() { + return "my_zone"; } - @Override - public String getAWSRoleAssumptionArn() { - return null; - } - - @Override - public String getClassicEC2RoleAssumptionArn() { - return null; - } - - @Override - public String getVpcEC2RoleAssumptionArn() { - return null; - } - - @Override - public boolean isDualAccount(){ - return false; + public String getDC() { + return "us-east-1"; } - @Override - public String getGcsServiceAccountId() { - return null; - } - - @Override - public String getGcsServiceAccountPrivateKeyLoc() { - return null; - } - - @Override - public String getPgpPasswordPhrase() { - return null; - } - - @Override - public String getPgpPublicKeyLoc() { - return null; - } - - /** - * Use this method for adding extra/ dynamic cassandra startup options or env properties - * - * @return - */ @Override - public Map getExtraEnvParams() { - return null; + public boolean isCreateNewTokenEnable() { + return mayCreateNewToken; } - @Override - public String getRestoreKeyspaceFilter() - { - return null; - } - - @Override - public String getRestoreCFFilter() { - return null; + public void setCreateNewToken(boolean mayCreateNewToken) { + this.mayCreateNewToken = mayCreateNewToken; } - - @Override - public String getIncrementalKeyspaceFilters() { - return null; - } - - @Override - public String getIncrementalCFFilter() { - return null; - } - - @Override - public String getSnapshotKeyspaceFilters() { - return null; - } - - @Override - public String getSnapshotCFFilter() { - return null; - } - - @Override - public String getVpcId() { - return ""; - } - - @Override - public Boolean isIncrBackupParallelEnabled() { - return false; - } - - @Override - public int getIncrementalBkupMaxConsumers() { - return 2; - } - - @Override - public int getUncrementalBkupQueueSize() { - return 100; - } - /** - * @return tombstone_warn_threshold in yaml - */ @Override - public int getTombstoneWarnThreshold() { - return 1000; + public boolean usePrivateIP() { + return usePrivateIp; } - /** - * @return tombstone_failure_threshold in yaml - */ - @Override - public int getTombstoneFailureThreshold() { - return 100000; + public void usePrivateIP(boolean usePrivateIp) { + this.usePrivateIp = usePrivateIp; } - /** - * @return streaming_socket_timeout_in_ms in yaml - */ - @Override - public int getStreamingSocketTimeoutInMS() { - return 86400000; + public BackupsToCompress getBackupsToCompress() { + return (BackupsToCompress) + fakeConfig.getOrDefault("Priam.backupsToCompress", BackupsToCompress.ALL); } @Override - public String getFlushKeyspaces() { - return ""; + public boolean skipDeletingOthersIngressRules() { + return this.skipDeletingOthersIngressRules; } - @Override - public String getFlushInterval() { - return null; + public void setSkipDeletingOthersIngressRules(boolean skipDeletingOthersIngressRules) { + this.skipDeletingOthersIngressRules = skipDeletingOthersIngressRules; } @Override - public String getBackupStatusFileLoc() { - return "backupstatus.ser"; + public boolean skipUpdatingOthersIngressRules() { + return this.skipUpdatingOthersIngressRules; } - @Override - public boolean useSudo() { - return true; + public void setSkipUpdatingOthersIngressRules(boolean skipUpdatingOthersIngressRules) { + this.skipUpdatingOthersIngressRules = skipUpdatingOthersIngressRules; } @Override - public String getBackupNotificationTopicArn() { - return null; + public String getDiskAccessMode() { + return this.diskAccessMode; } - @Override - public SchedulerType getFlushSchedulerType() throws UnsupportedTypeException { - return SchedulerType.HOUR; + public FakeConfiguration setDiskAccessMode(String diskAccessMode) { + this.diskAccessMode = diskAccessMode; + return this; } @Override - public String getFlushCronExpression() { - return null; + public boolean skipIngressUnlessIPIsPublic() { + return this.skipIngressUnlessIPIsPublic; } - @Override - public boolean isPostRestoreHookEnabled() { - return true; + public void setSkipIngressUnlessIPIsPublic(boolean skipIngressUnlessIPIsPublic) { + this.skipIngressUnlessIPIsPublic = skipIngressUnlessIPIsPublic; } @Override - public String getPostRestoreHook() { - return "iostat -d 2 10"; + public int getBackupThreads() { + return (Integer) + fakeConfig.getOrDefault( + "Priam.backup.threads", IConfiguration.super.getBackupThreads()); } - @Override - public String getPostRestoreHookHeartbeatFileName() { - return System.getProperty("java.io.tmpdir") + File.separator + "postrestorehook.heartbeat"; + public void setCompressionTransitionEpochMillis(long transitionTime) { + compressionTransitionEpochMillis = transitionTime; } @Override - public String getPostRestoreHookDoneFileName() { - return System.getProperty("java.io.tmpdir") + File.separator + "postrestorehook.done"; + public long getCompressionTransitionEpochMillis() { + return compressionTransitionEpochMillis; } @Override - public int getPostRestoreHookTimeOutInDays() { - return 2; + public int getStreamingKeepAlivePeriod() { + return 300; } } diff --git a/priam/src/test/java/com/netflix/priam/config/PriamConfigurationPersisterTest.java b/priam/src/test/java/com/netflix/priam/config/PriamConfigurationPersisterTest.java new file mode 100644 index 000000000..c675d6e72 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/config/PriamConfigurationPersisterTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.config; + +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.assertEquals; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.TestModule; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class PriamConfigurationPersisterTest { + private static PriamConfigurationPersister persister; + + @Rule public TemporaryFolder folder = new TemporaryFolder(); + + private FakeConfiguration fakeConfiguration; + + @Before + public void setUp() { + Injector injector = Guice.createInjector(new TestModule()); + fakeConfiguration = (FakeConfiguration) injector.getInstance(IConfiguration.class); + fakeConfiguration.fakeProperties.put("priam_test_config", folder.getRoot().getPath()); + + if (persister == null) persister = injector.getInstance(PriamConfigurationPersister.class); + } + + @After + public void cleanUp() { + fakeConfiguration.fakeProperties.clear(); + } + + @Test + @SuppressWarnings("unchecked") + public void execute() throws Exception { + Path structuredJson = Paths.get(folder.getRoot().getPath(), "structured.json"); + + persister.execute(); + assertTrue(structuredJson.toFile().exists()); + + ObjectMapper objectMapper = new ObjectMapper(); + Map myMap = + objectMapper.readValue(Files.readAllBytes(structuredJson), HashMap.class); + assertEquals(myMap.get("backupLocation"), fakeConfiguration.getBackupLocation()); + } + + @Test + public void getTimer() { + assertEquals( + "0 * * * * ? *", + PriamConfigurationPersister.getTimer(fakeConfiguration).getCronExpression()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/configSource/AbstractConfigSourceTest.java b/priam/src/test/java/com/netflix/priam/configSource/AbstractConfigSourceTest.java index 536f8c703..6175a284d 100644 --- a/priam/src/test/java/com/netflix/priam/configSource/AbstractConfigSourceTest.java +++ b/priam/src/test/java/com/netflix/priam/configSource/AbstractConfigSourceTest.java @@ -18,20 +18,18 @@ package com.netflix.priam.configSource; import com.google.common.collect.ImmutableList; +import java.util.List; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; - -public final class AbstractConfigSourceTest -{ - private static final Logger LOGGER = LoggerFactory.getLogger(AbstractConfigSourceTest.class.getName()); +public final class AbstractConfigSourceTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(AbstractConfigSourceTest.class.getName()); @Test - public void lists() - { + public void lists() { AbstractConfigSource source = new MemoryConfigSource(); source.set("foo", "bar,baz, qux "); final List values = source.getList("foo"); @@ -40,8 +38,7 @@ public void lists() } @Test - public void oneItem() - { + public void oneItem() { AbstractConfigSource source = new MemoryConfigSource(); source.set("foo", "bar"); final List values = source.getList("foo"); @@ -50,8 +47,7 @@ public void oneItem() } @Test - public void oneItemWithSpace() - { + public void oneItemWithSpace() { AbstractConfigSource source = new MemoryConfigSource(); source.set("foo", "\tbar "); final List values = source.getList("foo"); diff --git a/priam/src/test/java/com/netflix/priam/configSource/CompositeConfigSourceTest.java b/priam/src/test/java/com/netflix/priam/configSource/CompositeConfigSourceTest.java index b6ec25be9..40759df61 100644 --- a/priam/src/test/java/com/netflix/priam/configSource/CompositeConfigSourceTest.java +++ b/priam/src/test/java/com/netflix/priam/configSource/CompositeConfigSourceTest.java @@ -22,16 +22,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public final class CompositeConfigSourceTest -{ - private static final Logger LOGGER = LoggerFactory.getLogger(CompositeConfigSourceTest.class.getName()); +public final class CompositeConfigSourceTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(CompositeConfigSourceTest.class.getName()); @Test - public void read() - { + public void read() { MemoryConfigSource memoryConfigSource = new MemoryConfigSource(); IConfigSource configSource = new CompositeConfigSource(memoryConfigSource); - configSource.intialize("foo", "bar"); + configSource.initialize("foo", "bar"); Assert.assertEquals(0, configSource.size()); configSource.set("foo", "bar"); @@ -44,8 +43,7 @@ public void read() } @Test - public void readMultiple() - { + public void readMultiple() { MemoryConfigSource m1 = new MemoryConfigSource(); m1.set("foo", "foo"); MemoryConfigSource m2 = new MemoryConfigSource(); diff --git a/priam/src/test/java/com/netflix/priam/configSource/PropertiesConfigSourceTest.java b/priam/src/test/java/com/netflix/priam/configSource/PropertiesConfigSourceTest.java index 91ab4a759..d78b26ca9 100644 --- a/priam/src/test/java/com/netflix/priam/configSource/PropertiesConfigSourceTest.java +++ b/priam/src/test/java/com/netflix/priam/configSource/PropertiesConfigSourceTest.java @@ -22,40 +22,35 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - -public final class PropertiesConfigSourceTest -{ - private static final Logger LOGGER = LoggerFactory.getLogger(PropertiesConfigSourceTest.class.getName()); +public final class PropertiesConfigSourceTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(PropertiesConfigSourceTest.class.getName()); @Test - public void readFile() - { + public void readFile() { PropertiesConfigSource configSource = new PropertiesConfigSource("conf/Priam.properties"); - configSource.intialize("asgName", "region"); + configSource.initialize("asgName", "region"); - Assert.assertEquals("\"/tmp/commitlog\"", configSource.get("Priam.backup.commitlog.location")); - Assert.assertEquals(7102, configSource.get("Priam.thrift.port", 0)); - // File has 13 lines, but line 6 is "Priam.jmx.port7501", so it gets filtered out with empty string check. - Assert.assertEquals(12, configSource.size()); + Assert.assertEquals( + "\"/tmp/commitlog\"", configSource.get("Priam.backup.commitlog.location")); + // File has 13 lines, but line 6 is "Priam.jmx.port7501", so it gets filtered out with empty + // string check. + Assert.assertEquals(11, configSource.size()); } @Test - public void updateKey() - { + public void updateKey() { PropertiesConfigSource configSource = new PropertiesConfigSource("conf/Priam.properties"); - configSource.intialize("asgName", "region"); + configSource.initialize("asgName", "region"); - // File has 13 lines, but line 6 is "Priam.jmx.port7501", so it gets filtered out with empty string check. - Assert.assertEquals(12, configSource.size()); + // File has 12 lines, but line 6 is "Priam.jmx.port7501", so it gets filtered out with empty + // string check. + Assert.assertEquals(11, configSource.size()); configSource.set("foo", "bar"); - Assert.assertEquals(13, configSource.size()); + Assert.assertEquals(12, configSource.size()); Assert.assertEquals("bar", configSource.get("foo")); - - Assert.assertEquals(7102, configSource.get("Priam.thrift.port", 0)); - configSource.set("Priam.thrift.port", Integer.toString(10)); - Assert.assertEquals(10, configSource.get("Priam.thrift.port", 0)); } } diff --git a/priam/src/test/java/com/netflix/priam/configSource/SystemPropertiesConfigSourceTest.java b/priam/src/test/java/com/netflix/priam/configSource/SystemPropertiesConfigSourceTest.java index 77584e437..7def39eef 100644 --- a/priam/src/test/java/com/netflix/priam/configSource/SystemPropertiesConfigSourceTest.java +++ b/priam/src/test/java/com/netflix/priam/configSource/SystemPropertiesConfigSourceTest.java @@ -22,16 +22,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public final class SystemPropertiesConfigSourceTest -{ - private static final Logger LOGGER = LoggerFactory.getLogger(SystemPropertiesConfigSourceTest.class.getName()); +public final class SystemPropertiesConfigSourceTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(SystemPropertiesConfigSourceTest.class.getName()); @Test - public void read() - { + public void read() { final String key = "java.version"; SystemPropertiesConfigSource configSource = new SystemPropertiesConfigSource(); - configSource.intialize("asgName", "region"); + configSource.initialize("asgName", "region"); // sys props are filtered to starting with priam, so this should be missing. Assert.assertEquals(null, configSource.get(key)); diff --git a/priam/src/test/java/com/netflix/priam/connection/TestCassandraOperations.java b/priam/src/test/java/com/netflix/priam/connection/TestCassandraOperations.java new file mode 100644 index 000000000..89c9e5e59 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/connection/TestCassandraOperations.java @@ -0,0 +1,83 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.connection; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.mchange.io.FileUtils; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.IConfiguration; +import java.io.File; +import java.util.List; +import java.util.Map; +import mockit.Expectations; +import mockit.Mock; +import mockit.MockUp; +import mockit.Mocked; +import org.apache.cassandra.tools.NodeProbe; +import org.junit.Assert; +import org.junit.Test; + +/** Created by aagrawal on 3/1/19. */ +public class TestCassandraOperations { + private final String gossipInfo1 = "src/test/resources/gossipInfoSample_1.txt"; + @Mocked private NodeProbe nodeProbe; + @Mocked private JMXNodeTool jmxNodeTool; + private static CassandraOperations cassandraOperations; + + public TestCassandraOperations() { + new MockUp() { + @Mock + NodeProbe instance(IConfiguration config) { + return nodeProbe; + } + }; + Injector injector = Guice.createInjector(new BRTestModule()); + if (cassandraOperations == null) + cassandraOperations = injector.getInstance(CassandraOperations.class); + } + + @Test + public void testGossipInfo() throws Exception { + + String gossipInfoFromNodetool = FileUtils.getContentsAsString(new File(gossipInfo1)); + new Expectations() { + { + nodeProbe.getGossipInfo(false); + result = gossipInfoFromNodetool; + nodeProbe.getTokens("127.0.0.1"); + result = "123,234"; + } + }; + List> gossipInfoList = cassandraOperations.gossipInfo(); + System.out.println(gossipInfoList); + Assert.assertEquals(7, gossipInfoList.size()); + gossipInfoList + .stream() + .forEach( + gossipInfo -> { + Assert.assertEquals("us-east", gossipInfo.get("DC")); + Assert.assertNotNull(gossipInfo.get("PUBLIC_IP")); + Assert.assertEquals("1565153", gossipInfo.get("HEARTBEAT")); + if (gossipInfo.get("STATUS").equalsIgnoreCase("NORMAL")) + Assert.assertNotNull(gossipInfo.get("TOKENS")); + if (gossipInfo.get("PUBLIC_IP").equalsIgnoreCase("127.0.0.1")) + Assert.assertEquals("[123,234]", gossipInfo.get("TOKENS")); + }); + } +} diff --git a/priam/src/test/java/com/netflix/priam/defaultimpl/CassandraProcessManagerTest.java b/priam/src/test/java/com/netflix/priam/defaultimpl/CassandraProcessManagerTest.java index a34e5c760..dc7116923 100644 --- a/priam/src/test/java/com/netflix/priam/defaultimpl/CassandraProcessManagerTest.java +++ b/priam/src/test/java/com/netflix/priam/defaultimpl/CassandraProcessManagerTest.java @@ -18,54 +18,45 @@ package com.netflix.priam.defaultimpl; import com.google.inject.Guice; +import com.netflix.priam.backup.BRTestModule; import com.netflix.priam.config.FakeConfiguration; import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.BRTestModule; import com.netflix.priam.health.InstanceState; import com.netflix.priam.merics.CassMonitorMetrics; +import java.io.IOException; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.io.IOException; - -public class CassandraProcessManagerTest -{ +public class CassandraProcessManagerTest { CassandraProcessManager cpm; @Before - public void setup() - { - IConfiguration config = new FakeConfiguration("us-east-1", "test_cluster", "us-east-1a", "i-2378afd3"); - InstanceState instanceState = Guice.createInjector(new BRTestModule()).getInstance(InstanceState.class); - CassMonitorMetrics cassMonitorMetrics = Guice.createInjector(new BRTestModule()).getInstance(CassMonitorMetrics.class); + public void setup() { + IConfiguration config = new FakeConfiguration("test_cluster"); + InstanceState instanceState = + Guice.createInjector(new BRTestModule()).getInstance(InstanceState.class); + CassMonitorMetrics cassMonitorMetrics = + Guice.createInjector(new BRTestModule()).getInstance(CassMonitorMetrics.class); cpm = new CassandraProcessManager(config, instanceState, cassMonitorMetrics); } @Test - public void logProcessOutput_BadApp() throws IOException, InterruptedException - { + public void logProcessOutput_BadApp() throws IOException, InterruptedException { Process p = null; - try - { + try { p = new ProcessBuilder("ls", "/tmppppp").start(); int exitValue = p.waitFor(); Assert.assertTrue(0 != exitValue); cpm.logProcessOutput(p); - } - catch(IOException ioe) - { - if(p!=null) - cpm.logProcessOutput(p); + } catch (IOException ioe) { + if (p != null) cpm.logProcessOutput(p); } } - /** - * note: this will succeed on a *nix machine, unclear about anything else... - */ + /** note: this will succeed on a *nix machine, unclear about anything else... */ @Test - public void logProcessOutput_GoodApp() throws IOException, InterruptedException - { + public void logProcessOutput_GoodApp() throws IOException, InterruptedException { Process p = new ProcessBuilder("true").start(); int exitValue = p.waitFor(); Assert.assertEquals(0, exitValue); diff --git a/priam/src/test/java/com/netflix/priam/defaultimpl/FakeCassandraProcess.java b/priam/src/test/java/com/netflix/priam/defaultimpl/FakeCassandraProcess.java index 12dd386ff..e123169d1 100644 --- a/priam/src/test/java/com/netflix/priam/defaultimpl/FakeCassandraProcess.java +++ b/priam/src/test/java/com/netflix/priam/defaultimpl/FakeCassandraProcess.java @@ -1,19 +1,33 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package com.netflix.priam.defaultimpl; import java.io.IOException; -/** - * Created by aagrawal on 10/3/17. - */ +/** Created by aagrawal on 10/3/17. */ public class FakeCassandraProcess implements ICassandraProcess { @Override public void start(boolean join_ring) throws IOException { - //do nothing + // do nothing } @Override public void stop(boolean force) throws IOException { - //do nothing + // do nothing } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/utils/TestCassandraMonitor.java b/priam/src/test/java/com/netflix/priam/health/TestCassandraMonitor.java similarity index 68% rename from priam/src/test/java/com/netflix/priam/utils/TestCassandraMonitor.java rename to priam/src/test/java/com/netflix/priam/health/TestCassandraMonitor.java index 4539bd35e..4bf3fa61c 100644 --- a/priam/src/test/java/com/netflix/priam/utils/TestCassandraMonitor.java +++ b/priam/src/test/java/com/netflix/priam/health/TestCassandraMonitor.java @@ -1,5 +1,5 @@ /* - * Copyright 2017 Netflix, Inc. + * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,46 +15,40 @@ * */ -package com.netflix.priam.utils; +package com.netflix.priam.health; import com.google.inject.Guice; import com.google.inject.Injector; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.backup.BRTestModule; -import com.netflix.priam.health.InstanceState; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.connection.JMXNodeTool; +import com.netflix.priam.defaultimpl.ICassandraProcess; import com.netflix.priam.merics.CassMonitorMetrics; -import org.junit.Assert; +import java.io.ByteArrayInputStream; +import java.io.InputStream; import mockit.*; import org.apache.cassandra.tools.NodeProbe; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -/** - * Created by aagrawal on 7/18/17. - */ +/** Created by aagrawal on 7/18/17. */ public class TestCassandraMonitor { private static CassandraMonitor monitor; private static InstanceState instanceState; private static CassMonitorMetrics cassMonitorMetrics; + private IConfiguration config; - @Mocked - private Process mockProcess; - @Mocked - private NodeProbe nodeProbe; - @Mocked - private ICassandraProcess cassProcess; + @Mocked private Process mockProcess; + @Mocked private NodeProbe nodeProbe; + @Mocked private ICassandraProcess cassProcess; @Before public void setUp() { Injector injector = Guice.createInjector(new BRTestModule()); config = injector.getInstance(IConfiguration.class); - if (instanceState == null) - instanceState = injector.getInstance(InstanceState.class); + if (instanceState == null) instanceState = injector.getInstance(InstanceState.class); if (cassMonitorMetrics == null) cassMonitorMetrics = injector.getInstance(CassMonitorMetrics.class); @@ -78,26 +72,34 @@ public void testCassandraMonitor() throws Exception { @Test public void testNoAutoRemediation() throws Exception { - new MockUp() - { + new MockUp() { @Mock NodeProbe instance(IConfiguration config) { return nodeProbe; } }; final InputStream mockOutput = new ByteArrayInputStream("a process".getBytes()); - new Expectations() {{ - mockProcess.getInputStream(); result= mockOutput; - nodeProbe.isGossipRunning(); result=true; - nodeProbe.isNativeTransportRunning(); result=true; - nodeProbe.isThriftServerRunning(); result=true; - }}; + new Expectations() { + { + mockProcess.getInputStream(); + result = mockOutput; + nodeProbe.isGossipRunning(); + result = true; + nodeProbe.isNativeTransportRunning(); + result = true; + } + }; // Mock out the ps call final Runtime r = Runtime.getRuntime(); - String[] cmd = { "/bin/sh", "-c", "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName()}; + String[] cmd = { + "/bin/sh", + "-c", + "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName() + }; new Expectations(r) { { - r.exec(cmd); result=mockProcess; + r.exec(cmd); + result = mockProcess; } }; instanceState.setShouldCassandraBeAlive(false); @@ -108,7 +110,10 @@ NodeProbe instance(IConfiguration config) { Assert.assertTrue(!instanceState.shouldCassandraBeAlive()); Assert.assertTrue(instanceState.isCassandraProcessAlive()); new Verifications() { - { cassProcess.start(anyBoolean); times=0; } + { + cassProcess.start(anyBoolean); + times = 0; + } }; } @@ -117,17 +122,27 @@ public void testAutoRemediationRateLimit() throws Exception { final InputStream mockOutput = new ByteArrayInputStream("".getBytes()); instanceState.setShouldCassandraBeAlive(true); instanceState.markLastAttemptedStartTime(); - new Expectations() {{ - // 6 calls to execute should = 12 calls to getInputStream(); - mockProcess.getInputStream(); result=mockOutput; times=12; - cassProcess.start(true); times=2; - }}; + new Expectations() { + { + // 6 calls to execute should = 12 calls to getInputStream(); + mockProcess.getInputStream(); + result = mockOutput; + times = 12; + cassProcess.start(true); + times = 2; + } + }; // Mock out the ps call final Runtime r = Runtime.getRuntime(); - String[] cmd = { "/bin/sh", "-c", "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName()}; + String[] cmd = { + "/bin/sh", + "-c", + "ps -ef |grep -v -P \"\\sgrep\\s\" | grep " + config.getCassProcessName() + }; new Expectations(r) { { - r.exec(cmd); result=mockProcess; + r.exec(cmd); + result = mockProcess; } }; // Sleep ahead to ensure we have permits in the rate limiter diff --git a/priam/src/test/java/com/netflix/priam/health/TestInstanceStatus.java b/priam/src/test/java/com/netflix/priam/health/TestInstanceStatus.java index af9580092..c5839328e 100644 --- a/priam/src/test/java/com/netflix/priam/health/TestInstanceStatus.java +++ b/priam/src/test/java/com/netflix/priam/health/TestInstanceStatus.java @@ -1,3 +1,19 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package com.netflix.priam.health; import com.google.inject.Guice; @@ -8,10 +24,7 @@ import org.junit.Before; import org.junit.Test; -/** - * Test InstanceState - * Created by aagrawal on 9/22/17. - */ +/** Test InstanceState Created by aagrawal on 9/22/17. */ public class TestInstanceStatus { private TestInstanceState testInstanceState; @@ -23,47 +36,76 @@ public void setUp() { } @Test - public void testHealth(){ - //Verify good health. - Assert.assertTrue(testInstanceState.setParams(false, true, true, true, true, true, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(false,true, true, true, true, false, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(false,true, true, true, false, true, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(true,false, true, true, false, true, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(true,true, false, true, true, true, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(true,true, true, false, true, true, true, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(true,true, true, true, true, true, false, true).isHealthy()); - Assert.assertTrue(testInstanceState.setParams(true,true, true, true, false, false, true, true).isHealthy()); - - //Negative health case scenarios. - Assert.assertFalse(testInstanceState.setParams(false,false, true, true, false, true, true, true).isHealthy()); - Assert.assertFalse(testInstanceState.setParams(false,true, false, true, true, true, true, true).isHealthy()); - Assert.assertFalse(testInstanceState.setParams(false,true, true, false, true, true, true, true).isHealthy()); - Assert.assertFalse(testInstanceState.setParams(false,true, true, true, true, true, false, true).isHealthy()); - Assert.assertFalse(testInstanceState.setParams(false,true, true, true, false, false, true, true).isHealthy()); - Assert.assertFalse(testInstanceState.setParams(false,true, true, true, false, false, true, false).isHealthy()); + public void testHealth() { + // Verify good health. + Assert.assertTrue( + testInstanceState.setParams(false, true, true, true, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(false, true, true, true, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(false, true, true, true, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(true, false, true, true, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(true, true, false, true, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(true, true, true, false, true, true, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(true, true, true, true, true, false, true).isHealthy()); + Assert.assertTrue( + testInstanceState.setParams(true, true, true, true, false, true, true).isHealthy()); + // Negative health case scenarios. + Assert.assertFalse( + testInstanceState + .setParams(false, false, true, true, true, true, true) + .isHealthy()); + Assert.assertFalse( + testInstanceState + .setParams(false, true, false, true, true, true, true) + .isHealthy()); + Assert.assertFalse( + testInstanceState + .setParams(false, true, true, false, true, true, true) + .isHealthy()); + Assert.assertFalse( + testInstanceState + .setParams(false, true, true, true, true, false, true) + .isHealthy()); + Assert.assertFalse( + testInstanceState + .setParams(false, true, true, true, false, true, true) + .isHealthy()); + Assert.assertFalse( + testInstanceState + .setParams(false, true, true, true, false, true, false) + .isHealthy()); } - private class TestInstanceState{ - private InstanceState instanceState; + private class TestInstanceState { + private final InstanceState instanceState; - TestInstanceState(InstanceState instanceState1){ + TestInstanceState(InstanceState instanceState1) { this.instanceState = instanceState1; } - InstanceState setParams(boolean isRestoring, boolean isYmlWritten, boolean isCassandraProcessAlive, boolean isGossipEnabled, boolean isThriftEnabled, boolean isNativeEnabled, boolean isRequiredDirectoriesExist, boolean shouldCassandraBeAlive){ + InstanceState setParams( + boolean isRestoring, + boolean isYmlWritten, + boolean isCassandraProcessAlive, + boolean isGossipEnabled, + boolean isNativeEnabled, + boolean isRequiredDirectoriesExist, + boolean shouldCassandraBeAlive) { instanceState.setYmlWritten(isYmlWritten); instanceState.setCassandraProcessAlive(isCassandraProcessAlive); instanceState.setIsNativeTransportActive(isNativeEnabled); - instanceState.setIsThriftActive(isThriftEnabled); instanceState.setIsGossipActive(isGossipEnabled); instanceState.setIsRequiredDirectoriesExist(isRequiredDirectoriesExist); instanceState.setShouldCassandraBeAlive(shouldCassandraBeAlive); - if (isRestoring) - instanceState.setRestoreStatus(Status.STARTED); - else - instanceState.setRestoreStatus(Status.FINISHED); + if (isRestoring) instanceState.setRestoreStatus(Status.STARTED); + else instanceState.setRestoreStatus(Status.FINISHED); return instanceState; } diff --git a/priam/src/test/java/com/netflix/priam/identity/FakeMembership.java b/priam/src/test/java/com/netflix/priam/identity/FakeMembership.java index bcdd948fc..b320442d4 100644 --- a/priam/src/test/java/com/netflix/priam/identity/FakeMembership.java +++ b/priam/src/test/java/com/netflix/priam/identity/FakeMembership.java @@ -17,76 +17,60 @@ package com.netflix.priam.identity; +import com.google.common.collect.ImmutableSet; import java.util.Collection; +import java.util.HashSet; import java.util.List; +import java.util.Set; -import com.netflix.priam.identity.IMembership; +public class FakeMembership implements IMembership { -public class FakeMembership implements IMembership -{ + private ImmutableSet instances; + private Set acl; - private List instances; - - public FakeMembership(List priamInstances) - { - this.instances = priamInstances; - } - - public void setInstances( List priamInstances) - { - this.instances = priamInstances; + public FakeMembership(List priamInstances) { + this.instances = ImmutableSet.copyOf(priamInstances); + this.acl = new HashSet<>(); } @Override - public List getRacMembership() - { + public ImmutableSet getRacMembership() { return instances; } - + @Override - public List getCrossAccountRacMembership() - { - return null; + public ImmutableSet getCrossAccountRacMembership() { + return null; } - @Override - public int getRacMembershipSize() - { + public int getRacMembershipSize() { return 3; } @Override - public int getRacCount() - { + public int getRacCount() { return 3; } @Override - public void addACL(Collection listIPs, int from, int to) - { - // TODO Auto-generated method stub - + public void addACL(Collection listIPs, int from, int to) { + acl.addAll(listIPs); } @Override - public void removeACL(Collection listIPs, int from, int to) - { - // TODO Auto-generated method stub - + public void removeACL(Collection listIPs, int from, int to) { + acl.removeAll(listIPs); } @Override - public List listACL(int from, int to) - { - // TODO Auto-generated method stub - return null; + public ImmutableSet listACL(int from, int to) { + return ImmutableSet.copyOf(acl); } @Override - public void expandRacMembership(int count) - { + public void expandRacMembership(int count) { // TODO Auto-generated method stub - + } } diff --git a/priam/src/test/java/com/netflix/priam/identity/FakePriamInstanceFactory.java b/priam/src/test/java/com/netflix/priam/identity/FakePriamInstanceFactory.java index 5e8c83326..45b826b78 100644 --- a/priam/src/test/java/com/netflix/priam/identity/FakePriamInstanceFactory.java +++ b/priam/src/test/java/com/netflix/priam/identity/FakePriamInstanceFactory.java @@ -17,39 +17,52 @@ package com.netflix.priam.identity; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.inject.Inject; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; +import groovy.lang.Singleton; +import java.util.Comparator; +import java.util.Map; +import java.util.stream.Collectors; -import java.util.*; - -public class FakePriamInstanceFactory implements IPriamInstanceFactory -{ - private final Map instances = Maps.newHashMap(); - private final IConfiguration config; +@Singleton +public class FakePriamInstanceFactory implements IPriamInstanceFactory { + private final Map instances = Maps.newHashMap(); + private final InstanceInfo instanceInfo; @Inject - public FakePriamInstanceFactory(IConfiguration config) - { - this.config = config; + public FakePriamInstanceFactory(InstanceInfo instanceInfo) { + this.instanceInfo = instanceInfo; } @Override - public List getAllIds(String appName) - { - return new ArrayList(instances.values()); + public ImmutableSet getAllIds(String appName) { + return appName.endsWith("-dead") + ? ImmutableSet.of() + : ImmutableSet.copyOf( + instances + .values() + .stream() + .sorted(Comparator.comparingInt(PriamInstance::getId)) + .collect(Collectors.toList())); } - + @Override public PriamInstance getInstance(String appName, String dc, int id) { - return instances.get(id); + return instances.get(id); } @Override - public PriamInstance create(String app, int id, String instanceID, String hostname, String ip, String rac, Map volumes, String payload) - { + public PriamInstance create( + String app, + int id, + String instanceID, + String hostname, + String ip, + String rac, + Map volumes, + String payload) { PriamInstance ins = new PriamInstance(); ins.setApp(app); ins.setRac(rac); @@ -58,46 +71,18 @@ public PriamInstance create(String app, int id, String instanceID, String hostna ins.setInstanceId(instanceID); ins.setToken(payload); ins.setVolumes(volumes); - ins.setDC(config.getDC()); + ins.setDC(instanceInfo.getRegion()); instances.put(id, ins); return ins; } @Override - public void delete(PriamInstance inst) - { + public void delete(PriamInstance inst) { instances.remove(inst.getId()); } @Override - public void update(PriamInstance inst) - { + public void update(PriamInstance orig, PriamInstance inst) { instances.put(inst.getId(), inst); } - - - @Override - public void sort(List return_) - { - Comparator comparator = new Comparator() - { - - @Override - public int compare(PriamInstance o1, PriamInstance o2) - { - Integer c1 = o1.getId(); - Integer c2 = o2.getId(); - return c1.compareTo(c2); - } - }; - Collections.sort(return_, comparator); - } - - @Override - public void attachVolumes(PriamInstance instance, String mountPath, String device) - { - // TODO Auto-generated method stub - } - - } diff --git a/priam/src/test/java/com/netflix/priam/identity/config/FakeInstanceInfo.java b/priam/src/test/java/com/netflix/priam/identity/config/FakeInstanceInfo.java new file mode 100644 index 000000000..54b269dcf --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/identity/config/FakeInstanceInfo.java @@ -0,0 +1,97 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.identity.config; + +/** Created by aagrawal on 10/17/18. */ +public class FakeInstanceInfo implements InstanceInfo { + private String instanceId; + private String availabilityZone; + private String region; + private String instanceType; + private String asg; + private String vpcId; + + public FakeInstanceInfo(String instanceId, String availabilityZone, String region) { + this(instanceId, availabilityZone, region, "i2.xlarge", availabilityZone, ""); + } + + public FakeInstanceInfo( + String instanceId, + String availabilityZone, + String region, + String instanceType, + String asg, + String vpcId) { + this.instanceId = instanceId; + this.availabilityZone = availabilityZone; + this.region = region; + this.instanceType = instanceType; + this.asg = asg; + this.vpcId = vpcId; + } + + @Override + public String getRac() { + return availabilityZone; + } + + @Override + public String getHostname() { + return instanceId; + } + + @Override + public String getHostIP() { + return "127.0.0.0"; + } + + @Override + public String getPrivateIP() { + return "127.1.1.0"; + } + + @Override + public String getInstanceId() { + return instanceId; + } + + @Override + public String getInstanceType() { + return instanceType; + } + + @Override + public String getVpcId() { + return vpcId; + } + + @Override + public String getRegion() { + return region; + } + + @Override + public String getAutoScalingGroup() { + return asg; + } + + @Override + public InstanceEnvironment getInstanceEnvironment() { + return InstanceEnvironment.VPC; + } +} diff --git a/priam/src/test/java/com/netflix/priam/identity/config/TestAWSInstanceInfo.java b/priam/src/test/java/com/netflix/priam/identity/config/TestAWSInstanceInfo.java new file mode 100644 index 000000000..ffc3fd5cf --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/identity/config/TestAWSInstanceInfo.java @@ -0,0 +1,68 @@ +package com.netflix.priam.identity.config; + +import com.google.common.truth.Truth; +import mockit.Expectations; +import org.junit.Before; +import org.junit.Test; + +/** tests of {@link com.netflix.priam.identity.config.AWSInstanceInfo} */ +public class TestAWSInstanceInfo { + private AWSInstanceInfo instanceInfo; + + @Before + public void setUp() { + instanceInfo = + new AWSInstanceInfo( + () -> { + throw new RuntimeException("not implemented"); + }); + } + + @Test + public void testPublicHostIP() { + new Expectations(instanceInfo) { + { + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.PUBLIC_HOSTIP_URL); + result = "1.2.3.4"; + } + }; + Truth.assertThat(instanceInfo.getHostIP()).isEqualTo("1.2.3.4"); + } + + @Test + public void testMissingPublicHostIP() { + new Expectations(instanceInfo) { + { + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.PUBLIC_HOSTIP_URL); + result = null; + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.LOCAL_HOSTIP_URL); + result = "1.2.3.4"; + } + }; + Truth.assertThat(instanceInfo.getHostIP()).isEqualTo("1.2.3.4"); + } + + @Test + public void testPublicHostname() { + new Expectations(instanceInfo) { + { + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.PUBLIC_HOSTNAME_URL); + result = "hostname"; + } + }; + Truth.assertThat(instanceInfo.getHostname()).isEqualTo("hostname"); + } + + @Test + public void testMissingPublicHostname() { + new Expectations(instanceInfo) { + { + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.PUBLIC_HOSTNAME_URL); + result = null; + instanceInfo.tryGetDataFromUrl(AWSInstanceInfo.LOCAL_HOSTNAME_URL); + result = "hostname"; + } + }; + Truth.assertThat(instanceInfo.getHostname()).isEqualTo("hostname"); + } +} diff --git a/priam/src/test/java/com/netflix/priam/identity/token/AssignedTokenRetrieverTest.java b/priam/src/test/java/com/netflix/priam/identity/token/AssignedTokenRetrieverTest.java new file mode 100644 index 000000000..94c02cb79 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/identity/token/AssignedTokenRetrieverTest.java @@ -0,0 +1,366 @@ +package com.netflix.priam.identity.token; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSet; +import com.google.common.truth.Truth; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.IMembership; +import com.netflix.priam.identity.IPriamInstanceFactory; +import com.netflix.priam.identity.InstanceIdentity; +import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.utils.ITokenManager; +import com.netflix.priam.utils.Sleeper; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import mockit.Expectations; +import mockit.Mocked; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; + +public class AssignedTokenRetrieverTest { + public static final String APP = "testapp"; + public static final String DEAD_APP = "testapp-dead"; + + @Test + public void grabAssignedTokenStartDbInBootstrapModeWhenGossipAgreesCurrentInstanceIsTokenOwner( + @Mocked IPriamInstanceFactory factory, + @Mocked IConfiguration config, + @Mocked IMembership membership, + @Mocked Sleeper sleeper, + @Mocked ITokenManager tokenManager, + @Mocked InstanceInfo instanceInfo, + @Mocked TokenRetrieverUtils retrievalUtils) + throws Exception { + List liveHosts = newPriamInstances(); + Collections.shuffle(liveHosts); + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + new TokenRetrieverUtils.InferredTokenOwnership(); + inferredTokenOwnership.setTokenInformationStatus( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD); + inferredTokenOwnership.setTokenInformation( + new TokenRetrieverUtils.TokenInformation(liveHosts.get(0).getHostIP(), false)); + + new Expectations() { + { + config.getAppName(); + result = APP; + + factory.getAllIds(DEAD_APP); + result = ImmutableSet.of(); + + factory.getAllIds(APP); + result = ImmutableSet.copyOf(liveHosts); + + instanceInfo.getInstanceId(); + result = liveHosts.get(0).getInstanceId(); + + instanceInfo.getHostIP(); + result = liveHosts.get(0).getHostIP(); + + TokenRetrieverUtils.inferTokenOwnerFromGossip( + ImmutableSet.copyOf(liveHosts), + liveHosts.get(0).getToken(), + liveHosts.get(0).getDC()); + result = inferredTokenOwnership; + } + }; + + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, instanceInfo, sleeper, tokenManager); + InstanceIdentity instanceIdentity = + new InstanceIdentity(factory, membership, config, instanceInfo, tokenRetriever); + Truth.assertThat(instanceIdentity.isReplace()).isFalse(); + } + + @Test + public void grabAssignedTokenStartDbInReplaceModeWhenGossipAgreesPreviousTokenOwnerIsNotLive( + @Mocked IPriamInstanceFactory factory, + @Mocked IConfiguration config, + @Mocked IMembership membership, + @Mocked Sleeper sleeper, + @Mocked ITokenManager tokenManager, + @Mocked InstanceInfo instanceInfo, + @Mocked TokenRetrieverUtils retrievalUtils) + throws Exception { + List liveHosts = newPriamInstances(); + Collections.shuffle(liveHosts); + + PriamInstance deadInstance = liveHosts.remove(0); + PriamInstance newInstance = + newMockPriamInstance( + deadInstance.getDC(), + deadInstance.getRac(), + deadInstance.getId(), + String.format("new-fakeInstance-%d", deadInstance.getId()), + String.format("127.1.1.%d", deadInstance.getId() + 100), + String.format("new-fakeHost-%d", deadInstance.getId()), + deadInstance.getToken()); + + // the case we are trying to test is when Priam restarted after it acquired the + // token. new instance is already registered with token database. + liveHosts.add(newInstance); + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + new TokenRetrieverUtils.InferredTokenOwnership(); + inferredTokenOwnership.setTokenInformationStatus( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD); + inferredTokenOwnership.setTokenInformation( + new TokenRetrieverUtils.TokenInformation(deadInstance.getHostIP(), false)); + + new Expectations() { + { + config.getAppName(); + result = APP; + + factory.getAllIds(DEAD_APP); + result = ImmutableSet.of(deadInstance); + factory.getAllIds(APP); + result = ImmutableSet.copyOf(liveHosts); + + instanceInfo.getInstanceId(); + result = newInstance.getInstanceId(); + + TokenRetrieverUtils.inferTokenOwnerFromGossip( + ImmutableSet.copyOf(liveHosts), + newInstance.getToken(), + newInstance.getDC()); + result = inferredTokenOwnership; + } + }; + + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, instanceInfo, sleeper, tokenManager); + InstanceIdentity instanceIdentity = + new InstanceIdentity(factory, membership, config, instanceInfo, tokenRetriever); + Truth.assertThat(instanceIdentity.getReplacedIp()).isEqualTo(deadInstance.getHostIP()); + Truth.assertThat(instanceIdentity.isReplace()).isTrue(); + } + + @Test + public void grabAssignedTokenThrowWhenGossipAgreesPreviousTokenOwnerIsLive( + @Mocked IPriamInstanceFactory factory, + @Mocked IConfiguration config, + @Mocked IMembership membership, + @Mocked Sleeper sleeper, + @Mocked ITokenManager tokenManager, + @Mocked InstanceInfo instanceInfo, + @Mocked TokenRetrieverUtils retrievalUtils) { + List liveHosts = newPriamInstances(); + Collections.shuffle(liveHosts); + + PriamInstance deadInstance = liveHosts.remove(0); + PriamInstance newInstance = + newMockPriamInstance( + deadInstance.getDC(), + deadInstance.getRac(), + deadInstance.getId(), + String.format("new-fakeInstance-%d", deadInstance.getId()), + String.format("127.1.1.%d", deadInstance.getId() + 100), + String.format("new-fakeHost-%d", deadInstance.getId()), + deadInstance.getToken()); + + // the case we are trying to test is when Priam restarted after it acquired the + // token. new instance is already registered with token database. + liveHosts.add(newInstance); + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + new TokenRetrieverUtils.InferredTokenOwnership(); + inferredTokenOwnership.setTokenInformationStatus( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD); + inferredTokenOwnership.setTokenInformation( + new TokenRetrieverUtils.TokenInformation(deadInstance.getHostIP(), true)); + + new Expectations() { + { + config.getAppName(); + result = APP; + + factory.getAllIds(DEAD_APP); + result = ImmutableSet.of(deadInstance); + factory.getAllIds(APP); + result = ImmutableSet.copyOf(liveHosts); + + instanceInfo.getInstanceId(); + result = newInstance.getInstanceId(); + + TokenRetrieverUtils.inferTokenOwnerFromGossip( + ImmutableSet.copyOf(liveHosts), + newInstance.getToken(), + newInstance.getDC()); + result = inferredTokenOwnership; + } + }; + + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, instanceInfo, sleeper, tokenManager); + Assertions.assertThrows( + TokenRetrieverUtils.GossipParseException.class, + () -> + new InstanceIdentity( + factory, membership, config, instanceInfo, tokenRetriever)); + } + + @Test + public void grabAssignedTokenThrowToBuyTimeWhenGossipDisagreesOnPreviousTokenOwner( + @Mocked IPriamInstanceFactory factory, + @Mocked IConfiguration config, + @Mocked IMembership membership, + @Mocked Sleeper sleeper, + @Mocked ITokenManager tokenManager, + @Mocked InstanceInfo instanceInfo, + @Mocked TokenRetrieverUtils retrievalUtils) { + List liveHosts = newPriamInstances(); + Collections.shuffle(liveHosts); + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + new TokenRetrieverUtils.InferredTokenOwnership(); + inferredTokenOwnership.setTokenInformationStatus( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.MISMATCH); + inferredTokenOwnership.setTokenInformation( + new TokenRetrieverUtils.TokenInformation(liveHosts.get(0).getHostIP(), false)); + + new Expectations() { + { + config.getAppName(); + result = APP; + + factory.getAllIds(DEAD_APP); + result = ImmutableSet.of(); + factory.getAllIds(APP); + result = ImmutableSet.copyOf(liveHosts); + + instanceInfo.getInstanceId(); + result = liveHosts.get(0).getInstanceId(); + + TokenRetrieverUtils.inferTokenOwnerFromGossip( + ImmutableSet.copyOf(liveHosts), + liveHosts.get(0).getToken(), + liveHosts.get(0).getDC()); + result = inferredTokenOwnership; + } + }; + + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, instanceInfo, sleeper, tokenManager); + Assertions.assertThrows( + TokenRetrieverUtils.GossipParseException.class, + () -> + new InstanceIdentity( + factory, membership, config, instanceInfo, tokenRetriever)); + } + + @Test + public void grabAssignedTokenStartDbInBootstrapModeWhenGossipDisagreesOnPreviousTokenOwner( + @Mocked IPriamInstanceFactory factory, + @Mocked IConfiguration config, + @Mocked IMembership membership, + @Mocked Sleeper sleeper, + @Mocked ITokenManager tokenManager, + @Mocked InstanceInfo instanceInfo, + @Mocked TokenRetrieverUtils retrievalUtils) + throws Exception { + List liveHosts = newPriamInstances(); + Collections.shuffle(liveHosts); + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + new TokenRetrieverUtils.InferredTokenOwnership(); + inferredTokenOwnership.setTokenInformationStatus( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.MISMATCH); + inferredTokenOwnership.setTokenInformation( + new TokenRetrieverUtils.TokenInformation(liveHosts.get(0).getHostIP(), false)); + + new Expectations() { + { + config.getAppName(); + result = APP; + config.permitDirectTokenAssignmentWithGossipMismatch(); + result = true; + + factory.getAllIds(DEAD_APP); + result = ImmutableSet.of(); + factory.getAllIds(APP); + result = ImmutableSet.copyOf(liveHosts); + + instanceInfo.getInstanceId(); + result = liveHosts.get(0).getInstanceId(); + + TokenRetrieverUtils.inferTokenOwnerFromGossip( + ImmutableSet.copyOf(liveHosts), + liveHosts.get(0).getToken(), + liveHosts.get(0).getDC()); + result = inferredTokenOwnership; + } + }; + + ITokenRetriever tokenRetriever = + new TokenRetriever( + factory, membership, config, instanceInfo, sleeper, tokenManager); + InstanceIdentity instanceIdentity = + new InstanceIdentity(factory, membership, config, instanceInfo, tokenRetriever); + Truth.assertThat(Strings.isNullOrEmpty(instanceIdentity.getReplacedIp())).isTrue(); + Truth.assertThat(instanceIdentity.isReplace()).isFalse(); + } + + private List newPriamInstances() { + List instances = new ArrayList<>(); + + instances.addAll(newPriamInstances("eu-west", "1a", 0, "127.3.1.%d")); + instances.addAll(newPriamInstances("eu-west", "1b", 3, "127.3.2.%d")); + instances.addAll(newPriamInstances("eu-west", "1c", 6, "127.3.3.%d")); + + instances.addAll(newPriamInstances("us-east", "1c", 1, "127.1.3.%d")); + instances.addAll(newPriamInstances("us-east", "1a", 4, "127.1.1.%d")); + instances.addAll(newPriamInstances("us-east", "1b", 7, "127.1.2.%d")); + + instances.addAll(newPriamInstances("us-west-2", "2a", 2, "127.2.1.%d")); + instances.addAll(newPriamInstances("us-west-2", "2b", 5, "127.2.2.%d")); + instances.addAll(newPriamInstances("us-west-2", "2c", 8, "127.2.3.%d")); + + return instances; + } + + private List newPriamInstances( + String dc, String rack, int seqNo, String ipRanges) { + return IntStream.range(0, 3) + .map(e -> seqNo + (e * 9)) + .mapToObj( + e -> + newMockPriamInstance( + dc, + rack, + e, + String.format("fakeInstance-%d", e), + String.format(ipRanges, e), + String.format("fakeHost-%d", e), + Integer.toString(e))) + .collect(Collectors.toList()); + } + + private PriamInstance newMockPriamInstance( + String dc, + String rack, + int id, + String instanceId, + String hostIp, + String hostName, + String token) { + PriamInstance priamInstance = new PriamInstance(); + priamInstance.setApp(APP); + priamInstance.setDC(dc); + priamInstance.setRac(rack); + priamInstance.setId(id); + priamInstance.setInstanceId(instanceId); + priamInstance.setHost(hostName); + priamInstance.setHostIP(hostIp); + priamInstance.setToken(token); + + return priamInstance; + } +} diff --git a/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverTest.java b/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverTest.java new file mode 100644 index 000000000..2d589d76f --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverTest.java @@ -0,0 +1,476 @@ +/* + * Copyright 2019 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.identity.token; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.truth.Truth; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.IMembership; +import com.netflix.priam.identity.IPriamInstanceFactory; +import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.utils.FakeSleeper; +import com.netflix.priam.utils.SystemUtils; +import com.netflix.priam.utils.TokenManager; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import mockit.Expectations; +import mockit.Mocked; +import org.apache.commons.lang3.math.Fraction; +import org.codehaus.jettison.json.JSONObject; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; + +/** Created by aagrawal on 3/1/19. */ +public class TokenRetrieverTest { + @Mocked private IMembership membership; + private IPriamInstanceFactory factory; + private InstanceInfo instanceInfo; + private IConfiguration configuration; + + private Map tokenToEndpointMap = + IntStream.range(0, 6) + .boxed() + .collect( + Collectors.toMap(String::valueOf, e -> String.format("127.0.0.%s", e))); + private ImmutableList liveInstances = ImmutableList.copyOf(tokenToEndpointMap.values()); + + public TokenRetrieverTest() { + Injector injector = Guice.createInjector(new BRTestModule()); + instanceInfo = injector.getInstance(InstanceInfo.class); + configuration = injector.getInstance(IConfiguration.class); + factory = injector.getInstance(IPriamInstanceFactory.class); + } + + @Test + public void testNoReplacementNormalScenario() throws Exception { + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of(); + } + }; + PriamInstance priamInstance = getTokenRetriever().grabExistingToken(); + Truth.assertThat(priamInstance).isNull(); + } + + @Test + // There is no slot available for replacement as per Token Database. + public void testNoReplacementNoSpotAvailable() throws Exception { + List allInstances = getInstances(1); + Set racMembership = getRacMembership(1); + racMembership.add(instanceInfo.getInstanceId()); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.copyOf(racMembership); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + Truth.assertThat(tokenRetriever.grabExistingToken()).isNull(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isFalse(); + Truth.assertThat(factory.getAllIds(configuration.getAppName())) + .containsExactlyElementsIn(allInstances); + } + + @Test + // There is a potential slot for dead token but we are unable to replace. + public void testNoReplacementNoGossipMatch(@Mocked SystemUtils systemUtils) throws Exception { + getInstances(2); + Set racMembership = getRacMembership(1); + racMembership.add(instanceInfo.getInstanceId()); + // gossip info returns null, thus unable to replace the instance. + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.copyOf(racMembership); + SystemUtils.getDataFromUrl(anyString); + result = getStatus(liveInstances, tokenToEndpointMap); + times = 1; + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + Truth.assertThat(tokenRetriever.grabExistingToken()).isNull(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isFalse(); + } + + @Test + // There is a potential slot for dead token but we are unable to replace. + public void testUsePregeneratedTokenWhenThereIsNoGossipMatchForDeadToken( + @Mocked SystemUtils systemUtils) throws Exception { + create(0, "iid_0", "host_0", "127.0.0.0", instanceInfo.getRac(), 0 + ""); + create(1, "new_slot", "host_1", "127.0.0.1", instanceInfo.getRac(), 1 + ""); + // gossip info returns null, thus unable to replace the instance. + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of(); + SystemUtils.getDataFromUrl(anyString); + result = getStatus(liveInstances, tokenToEndpointMap); + times = 1; + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + PriamInstance instance = tokenRetriever.grabExistingToken(); + Truth.assertThat(instance).isNotNull(); + Truth.assertThat(instance.getId()).isEqualTo(1); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isFalse(); + } + + @Test + public void testReplacementGossipMatch(@Mocked SystemUtils systemUtils) throws Exception { + getInstances(6); + Set racMembership = getRacMembership(2); + racMembership.add(instanceInfo.getInstanceId()); + + List myliveInstances = + liveInstances + .stream() + .filter(x -> !x.equalsIgnoreCase("127.0.0.3")) + .collect(Collectors.toList()); + String gossipResponse = getStatus(myliveInstances, tokenToEndpointMap); + + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.copyOf(racMembership); + SystemUtils.getDataFromUrl(anyString); + returns(gossipResponse, gossipResponse, null, "random_value", gossipResponse); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + Truth.assertThat(tokenRetriever.grabExistingToken()).isNotNull(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isTrue(); + Truth.assertThat(tokenRetriever.getReplacedIp().get()).isEqualTo("127.0.0.3"); + } + + @Test + public void testPrioritizeDeadTokens(@Mocked SystemUtils systemUtils) throws Exception { + create(0, "iid_0", "host_0", "127.0.0.0", instanceInfo.getRac(), 0 + ""); + create(1, "new_slot", "host_1", "127.0.0.1", instanceInfo.getRac(), 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of(); + SystemUtils.getDataFromUrl(anyString); + returns(null, null); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + Truth.assertThat(tokenRetriever.grabExistingToken()).isNotNull(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isTrue(); + Truth.assertThat(tokenRetriever.getReplacedIp().get()).isEqualTo("127.0.0.0"); + } + + @Test + public void testPrioritizeDeadInstancesEvenIfAfterANewSlot(@Mocked SystemUtils systemUtils) + throws Exception { + create(0, "new_slot", "host_0", "127.0.0.0", instanceInfo.getRac(), 0 + ""); + create(1, "iid_1", "host_1", "127.0.0.1", instanceInfo.getRac(), 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of(); + SystemUtils.getDataFromUrl(anyString); + returns(null, null); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + Truth.assertThat(tokenRetriever.grabExistingToken()).isNotNull(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isTrue(); + Truth.assertThat(tokenRetriever.getReplacedIp().get()).isEqualTo("127.0.0.1"); + } + + @Test + public void testNewTokenFailureIfProhibited() { + ((FakeConfiguration) configuration).setCreateNewToken(false); + create(0, "iid_0", "host_0", "127.0.0.0", instanceInfo.getRac(), 0 + ""); + create(1, "iid_1", "host_1", "127.0.0.1", instanceInfo.getRac(), 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of("iid_0", "iid_1"); + } + }; + Assertions.assertThrows(IllegalStateException.class, () -> getTokenRetriever().get()); + } + + @Test + public void testNewTokenNoInstancesInRac() throws Exception { + create(0, "iid_0", "host_0", "127.0.0.0", "az2", 0 + ""); + create(1, "iid_1", "host_1", "127.0.0.1", "az2", 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of("iid_0", "iid_1"); + membership.getRacCount(); + result = 1; + membership.getRacMembershipSize(); + result = 3; + } + }; + PriamInstance instance = getTokenRetriever().get(); + Truth.assertThat(instance.getToken()).isEqualTo("1808575600"); + // region offset for us-east-1 + index of rac az1 (1808575600 + 0) + Truth.assertThat(instance.getId()).isEqualTo(1808575600); + } + + @Test + public void testNewTokenGenerationNoInstancesWithLargeEnoughId() throws Exception { + create(0, "iid_0", "host_0", "127.0.0.0", "az1", 0 + ""); + create(1, "iid_1", "host_1", "127.0.0.1", "az1", 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of("iid_0", "iid_1"); + membership.getRacCount(); + result = 1; + membership.getRacMembershipSize(); + result = 3; + } + }; + PriamInstance instance = getTokenRetriever().get(); + Truth.assertThat(instance.getToken()).isEqualTo("170141183460469231731687303717692681326"); + // region offset for us-east-1 + number of racs in cluster (3) + Truth.assertThat(instance.getId()).isEqualTo(1808575603); + } + + @Test + public void testNewTokenFailureWhenMyRacIsNotInCluster() { + ((FakeConfiguration) configuration).setRacs("az2", "az3"); + create(0, "iid_0", "host_0", "127.0.0.0", "az2", 0 + ""); + create(1, "iid_1", "host_1", "127.0.0.1", "az2", 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of("iid_0", "iid_1"); + } + }; + Assertions.assertThrows(IllegalStateException.class, () -> getTokenRetriever().get()); + } + + @Test + public void testNewTokenGenerationMultipleInstancesWithLargetEnoughIds() throws Exception { + create(2000000000, "iid_0", "host_0", "127.0.0.0", "az1", 0 + ""); + create(2000000001, "iid_1", "host_1", "127.0.0.1", "az1", 1 + ""); + new Expectations() { + { + membership.getRacMembership(); + result = ImmutableSet.of("iid_0", "iid_1"); + membership.getRacCount(); + result = 1; + membership.getRacMembershipSize(); + result = 3; + } + }; + PriamInstance instance = getTokenRetriever().get(); + Truth.assertThat(instance.getToken()) + .isEqualTo("10856391546591660081525376676060033425699421368"); + // max id (2000000001) + total instances (3) + Truth.assertThat(instance.getId()).isEqualTo(2000000004); + } + + @Test + public void testPreassignedTokenNotReplacedIfPublicIPMatch(@Mocked SystemUtils systemUtils) + throws Exception { + // IP in DB doesn't matter so we make it different to confirm that + create(0, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 0 + ""); + getInstances(5); + String gossipResponse = getStatus(liveInstances, tokenToEndpointMap); + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + returns(gossipResponse, gossipResponse, null, "random_value", gossipResponse); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isFalse(); + } + + @Test + public void testPreassignedTokenNotReplacedIfPrivateIPMatch(@Mocked SystemUtils systemUtils) + throws Exception { + // IP in DB doesn't matter so we make it different to confirm that + create(0, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 0 + ""); + getInstances(5); + Map myTokenToEndpointMap = + IntStream.range(0, 7) + .boxed() + .collect( + Collectors.toMap( + String::valueOf, e -> String.format("127.1.1.%s", e))); + ImmutableList myLiveInstances = ImmutableList.copyOf(tokenToEndpointMap.values()); + String gossipResponse = getStatus(myLiveInstances, myTokenToEndpointMap); + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + returns(gossipResponse, gossipResponse, null, "random_value", gossipResponse); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isFalse(); + } + + @Test + public void testGetPreassignedTokenThrowsIfOwnerIPIsLive(@Mocked SystemUtils systemUtils) + throws Exception { + getInstances(5); + create(6, instanceInfo.getInstanceId(), "host_5", "1.2.3.4", "az1", 6 + ""); + Map myTokenToEndpointMap = + IntStream.range(0, 7) + .boxed() + .collect( + Collectors.toMap( + String::valueOf, e -> String.format("18.221.0.%s", e))); + ImmutableList myLiveInstances = ImmutableList.copyOf(myTokenToEndpointMap.values()); + String gossipResponse = getStatus(myLiveInstances, myTokenToEndpointMap); + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + returns(gossipResponse, gossipResponse, null, "random_value", gossipResponse); + } + }; + Assertions.assertThrows( + TokenRetrieverUtils.GossipParseException.class, () -> getTokenRetriever().get()); + } + + @Test + public void testGetPreassignedTokenReplacesIfOwnerIPIsNotLive(@Mocked SystemUtils systemUtils) + throws Exception { + getInstances(5); + create(6, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 6 + ""); + Map myTokenToEndpointMap = + IntStream.range(0, 7) + .boxed() + .collect( + Collectors.toMap( + String::valueOf, e -> String.format("18.221.0.%s", e))); + List myLiveInstances = + tokenToEndpointMap.values().stream().sorted().limit(6).collect(Collectors.toList()); + String gossipResponse = getStatus(myLiveInstances, myTokenToEndpointMap); + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + returns(gossipResponse, gossipResponse, null, "random_value", gossipResponse); + } + }; + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getReplacedIp().isPresent()).isTrue(); + } + + @Test + public void testIPIsUpdatedWhenGrabbingPreassignedToken(@Mocked SystemUtils systemUtils) + throws Exception { + create(0, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 0 + ""); + Truth.assertThat(getTokenRetriever().get().getHostIP()).isEqualTo("127.0.0.0"); + } + + @Test + public void testRingPositionFirst(@Mocked SystemUtils systemUtils) throws Exception { + getInstances(6); + create(0, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 0 + ""); + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getRingPosition()).isEqualTo(Fraction.getFraction(0, 7)); + } + + @Test + public void testRingPositionMiddle(@Mocked SystemUtils systemUtils) throws Exception { + getInstances(3); + create(4, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 4 + ""); + createByIndex(5); + createByIndex(6); + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getRingPosition()).isEqualTo(Fraction.getFraction(3, 6)); + } + + @Test + public void testRingPositionLast(@Mocked SystemUtils systemUtils) throws Exception { + getInstances(6); + create(7, instanceInfo.getInstanceId(), "host_0", "1.2.3.4", "az1", 7 + ""); + TokenRetriever tokenRetriever = getTokenRetriever(); + tokenRetriever.get(); + Truth.assertThat(tokenRetriever.getRingPosition()).isEqualTo(Fraction.getFraction(6, 7)); + } + + private String getStatus(List liveInstances, Map tokenToEndpointMap) { + JSONObject jsonObject = new JSONObject(); + try { + jsonObject.put("live", liveInstances); + jsonObject.put("tokenToEndpointMap", tokenToEndpointMap); + } catch (Exception e) { + + } + return jsonObject.toString(); + } + + private List getInstances(int noOfInstances) { + List allInstances = Lists.newArrayList(); + for (int i = 1; i <= noOfInstances; i++) allInstances.add(createByIndex(i)); + return allInstances; + } + + private PriamInstance createByIndex(int index) { + return create( + index, + String.format("instance_id_%d", index), + String.format("hostname_%d", index), + String.format("127.0.0.%d", index), + instanceInfo.getRac(), + index + ""); + } + + private Set getRacMembership(int noOfInstances) { + return IntStream.range(1, noOfInstances + 1) + .mapToObj(i -> String.format("instance_id_%d", i)) + .collect(Collectors.toSet()); + } + + private PriamInstance create( + int id, String instanceID, String hostname, String ip, String rac, String payload) { + return factory.create( + configuration.getAppName(), id, instanceID, hostname, ip, rac, null, payload); + } + + private TokenRetriever getTokenRetriever() { + return new TokenRetriever( + factory, + membership, + configuration, + instanceInfo, + new FakeSleeper(), + new TokenManager(configuration)); + } +} diff --git a/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverUtilsTest.java b/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverUtilsTest.java new file mode 100644 index 000000000..dbb00003f --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/identity/token/TokenRetrieverUtilsTest.java @@ -0,0 +1,226 @@ +package com.netflix.priam.identity.token; + +import static org.hamcrest.core.AllOf.allOf; +import static org.hamcrest.core.IsNot.not; + +import com.google.common.collect.ImmutableSet; +import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.utils.SystemUtils; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import mockit.Expectations; +import mockit.Mocked; +import org.codehaus.jettison.json.JSONObject; +import org.junit.Assert; +import org.junit.Test; + +public class TokenRetrieverUtilsTest { + private static final String APP = "testapp"; + private static final String STATUS_URL_FORMAT = "http://%s:8080/Priam/REST/v1/cassadmin/status"; + + private ImmutableSet instances = + ImmutableSet.copyOf( + IntStream.range(0, 6) + .mapToObj( + e -> + newMockPriamInstance( + APP, + "us-east", + (e < 3) ? "az1" : "az2", + e, + String.format("fakeInstance-%d", e), + String.format("127.0.0.%d", e), + String.format("fakeHost-%d", e), + String.valueOf(e))) + .collect(Collectors.toList())); + + private Map tokenToEndpointMap = + IntStream.range(0, 6) + .mapToObj(e -> Integer.valueOf(e)) + .collect( + Collectors.toMap( + e -> String.valueOf(e), e -> String.format("127.0.0.%s", e))); + private List liveInstances = + IntStream.range(0, 6) + .mapToObj(e -> String.format("127.0.0.%d", e)) + .collect(Collectors.toList()); + + @Test + public void testRetrieveTokenOwnerWhenGossipAgrees(@Mocked SystemUtils systemUtils) { + // mark previous instance with tokenNumber 4 as down in gossip. + List myliveInstances = + liveInstances + .stream() + .filter(x -> !x.equalsIgnoreCase("127.0.0.4")) + .collect(Collectors.toList()); + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + result = getStatus(myliveInstances, tokenToEndpointMap); + } + }; + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip(instances, "4", "us-east"); + Assert.assertEquals( + "127.0.0.4", inferredTokenOwnership.getTokenInformation().getIpAddress()); + } + + @Test + public void testRetrieveTokenOwnerWhenGossipDisagrees(@Mocked SystemUtils systemUtils) { + + List myliveInstances = + liveInstances + .stream() + .filter(x -> !x.equalsIgnoreCase("127.0.0.4")) + .collect(Collectors.toList()); + + new Expectations() { + { + SystemUtils.getDataFromUrl( + withArgThat( + allOf( + not(String.format(STATUS_URL_FORMAT, "127.0.0.0")), + not(String.format(STATUS_URL_FORMAT, "127.0.0.2")), + not(String.format(STATUS_URL_FORMAT, "127.0.0.5"))))); + result = getStatus(myliveInstances, tokenToEndpointMap); + minTimes = 0; + + SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, "127.0.0.0")); + result = getStatus(liveInstances, tokenToEndpointMap); + minTimes = 0; + + SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, "127.0.0.2")); + result = getStatus(liveInstances, tokenToEndpointMap); + minTimes = 0; + + SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, "127.0.0.5")); + result = null; + minTimes = 0; + } + }; + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip(instances, "4", "us-east"); + Assert.assertEquals( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.MISMATCH, + inferredTokenOwnership.getTokenInformationStatus()); + Assert.assertTrue(inferredTokenOwnership.getTokenInformation().isLive()); + } + + @Test + public void testRetrieveTokenOwnerWhenGossipDisagrees_2Nodes(@Mocked SystemUtils systemUtils) { + ImmutableSet myInstances = + ImmutableSet.copyOf(instances.stream().limit(3).collect(Collectors.toList())); + List myLiveInstances = liveInstances.stream().limit(3).collect(Collectors.toList()); + Map myTokenToEndpointMap = + IntStream.range(0, 3) + .mapToObj(String::valueOf) + .collect( + Collectors.toMap( + Function.identity(), (i) -> tokenToEndpointMap.get(i))); + Map alteredMap = new HashMap<>(myTokenToEndpointMap); + alteredMap.put("1", "1.2.3.4"); + + new Expectations() { + { + SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, "127.0.0.0")); + result = getStatus(myLiveInstances, myTokenToEndpointMap); + minTimes = 0; + + SystemUtils.getDataFromUrl(String.format(STATUS_URL_FORMAT, "127.0.0.2")); + result = getStatus(myLiveInstances, alteredMap); + minTimes = 0; + } + }; + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip(myInstances, "1", "us-east"); + Assert.assertEquals( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.MISMATCH, + inferredTokenOwnership.getTokenInformationStatus()); + Assert.assertTrue(inferredTokenOwnership.getTokenInformation().isLive()); + } + + @Test + public void testRetrieveTokenOwnerWhenAllHostsInGossipReturnsNull( + @Mocked SystemUtils systemUtils) throws Exception { + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + result = getStatus(liveInstances, tokenToEndpointMap); + } + }; + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip(instances, "4", "us-east"); + Assert.assertEquals( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.GOOD, + inferredTokenOwnership.getTokenInformationStatus()); + Assert.assertTrue(inferredTokenOwnership.getTokenInformation().isLive()); + } + + @Test + public void testRetrieveTokenOwnerWhenAllInstancesThrowGossipParseException( + @Mocked SystemUtils systemUtils) { + + new Expectations() { + { + SystemUtils.getDataFromUrl(anyString); + result = new TokenRetrieverUtils.GossipParseException("Test"); + } + }; + + TokenRetrieverUtils.InferredTokenOwnership inferredTokenOwnership = + TokenRetrieverUtils.inferTokenOwnerFromGossip(instances, "4", "us-east"); + Assert.assertEquals( + TokenRetrieverUtils.InferredTokenOwnership.TokenInformationStatus.UNREACHABLE_NODES, + inferredTokenOwnership.getTokenInformationStatus()); + Assert.assertNull(inferredTokenOwnership.getTokenInformation()); + } + + private String newGossipRecord( + int tokenNumber, String ip, String dc, String rack, String status) { + return String.format( + "{\"TOKENS\":\"[%d]\",\"PUBLIC_IP\":\"%s\",\"RACK\":\"%s\",\"STATUS\":\"%s\",\"DC\":\"%s\"}", + tokenNumber, ip, dc, status, rack); + } + + private String getStatus(List liveInstances, Map tokenToEndpointMap) { + JSONObject jsonObject = new JSONObject(); + try { + jsonObject.put("live", liveInstances); + jsonObject.put("tokenToEndpointMap", tokenToEndpointMap); + } catch (Exception e) { + + } + return jsonObject.toString(); + } + + private PriamInstance newMockPriamInstance( + String app, + String dc, + String rack, + int id, + String instanceId, + String hostIp, + String hostName, + String token) { + PriamInstance priamInstance = new PriamInstance(); + priamInstance.setApp(app); + priamInstance.setDC(dc); + priamInstance.setRac(rack); + priamInstance.setId(id); + priamInstance.setInstanceId(instanceId); + priamInstance.setHost(hostName); + priamInstance.setHostIP(hostIp); + priamInstance.setToken(token); + + return priamInstance; + } +} diff --git a/priam/src/test/java/com/netflix/priam/notification/TestBackupNotificationMgr.java b/priam/src/test/java/com/netflix/priam/notification/TestBackupNotificationMgr.java new file mode 100644 index 000000000..7fa6d6bb2 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/notification/TestBackupNotificationMgr.java @@ -0,0 +1,324 @@ +package com.netflix.priam.notification; + +import com.amazonaws.services.sns.model.MessageAttributeValue; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.backup.AbstractBackupPath; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.backup.BackupVerificationResult; +import com.netflix.priam.config.IBackupRestoreConfig; +import com.netflix.priam.config.IConfiguration; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.ParseException; +import java.time.Instant; +import java.util.Map; +import mockit.Capturing; +import mockit.Expectations; +import mockit.Mocked; +import mockit.Verifications; +import org.junit.Before; +import org.junit.Test; + +public class TestBackupNotificationMgr { + private Injector injector; + private BackupNotificationMgr backupNotificationMgr; + private Provider abstractBackupPathProvider; + private IConfiguration configuration; + + @Before + public void setUp() { + if (injector == null) { + injector = Guice.createInjector(new BRTestModule()); + } + + if (backupNotificationMgr == null) { + backupNotificationMgr = injector.getInstance(BackupNotificationMgr.class); + } + + if (abstractBackupPathProvider == null) { + abstractBackupPathProvider = injector.getProvider(AbstractBackupPath.class); + } + } + + @Test + public void testNotificationNonEmptyFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = "SNAPSHOT_VERIFIED, META_V2"; + maxTimes = 2; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.META_V2); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 2; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + } + + @Test + public void testNoNotificationsNonEmptyFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = "META_V2"; + maxTimes = 2; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 0; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.SST); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 2; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 0; + } + }; + } + + @Test + public void testNotificationsEmptyFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = ""; + maxTimes = 1; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.SST); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 1; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + } + + @Test + public void testNotificationsInvalidFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = "SOME_FAKE_FILE_TYPE_1, SOME_FAKE_FILE_TYPE_2"; + maxTimes = 2; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.SST); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 2; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + } + + @Test + public void testNotificationsPartiallyValidFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = "SOME_FAKE_FILE_TYPE_1, SOME_FAKE_FILE_TYPE_2, META_V2"; + maxTimes = 2; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.META_V2); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 2; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + } + + @Test + public void testNoNotificationsPartiallyValidFilter( + @Mocked IBackupRestoreConfig backupRestoreConfig, + @Capturing INotificationService notificationService) + throws ParseException { + new Expectations() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + result = "SOME_FAKE_FILE_TYPE_1, SOME_FAKE_FILE_TYPE_2, SST"; + maxTimes = 2; + } + }; + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 0; + } + }; + Path path = + Paths.get( + "fakeDataLocation", + "fakeKeyspace", + "fakeColumnFamily", + "fakeBackup", + "fakeData.db"); + AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.META_V2); + BackupEvent backupEvent = new BackupEvent(abstractBackupPath); + backupNotificationMgr.updateEventStart(backupEvent); + new Verifications() { + { + backupRestoreConfig.getBackupNotifyComponentIncludeList(); + maxTimes = 2; + } + + { + notificationService.notify(anyString, (Map) any); + maxTimes = 0; + } + }; + } + + @Test + public void testNotify(@Capturing INotificationService notificationService) { + new Expectations() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + BackupVerificationResult backupVerificationResult = getBackupVerificationResult(); + backupNotificationMgr.notify(backupVerificationResult); + new Verifications() { + { + notificationService.notify(anyString, (Map) any); + maxTimes = 1; + } + }; + } + + private static BackupVerificationResult getBackupVerificationResult() { + BackupVerificationResult result = new BackupVerificationResult(); + result.valid = true; + result.manifestAvailable = true; + result.remotePath = "some_random"; + result.filesMatched = 123; + result.snapshotInstant = Instant.EPOCH; + return result; + } +} diff --git a/priam/src/test/java/com/netflix/priam/resources/BackupServletTest.java b/priam/src/test/java/com/netflix/priam/resources/BackupServletTest.java index 8fb87d5e8..ce1ae0fb5 100644 --- a/priam/src/test/java/com/netflix/priam/resources/BackupServletTest.java +++ b/priam/src/test/java/com/netflix/priam/resources/BackupServletTest.java @@ -17,23 +17,19 @@ package com.netflix.priam.resources; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import static org.junit.Assert.assertEquals; + import com.google.inject.Guice; import com.google.inject.Injector; -import com.google.inject.Provider; -import com.netflix.priam.defaultimpl.ICassandraProcess; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.PriamServer; import com.netflix.priam.backup.*; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.health.InstanceState; -import com.netflix.priam.identity.IPriamInstanceFactory; -import com.netflix.priam.identity.InstanceIdentity; -import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.identity.config.InstanceInfo; import com.netflix.priam.restore.Restore; -import com.netflix.priam.tuner.ICassandraTuner; -import com.netflix.priam.utils.ITokenManager; -import com.netflix.priam.utils.TokenManager; +import com.netflix.priam.utils.DateUtil; +import java.time.Instant; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; import mockit.Expectations; import mockit.Mocked; import mockit.integration.junit4.JMockit; @@ -42,372 +38,111 @@ import org.junit.Test; import org.junit.runner.RunWith; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.util.Date; -import java.util.List; - -import static org.junit.Assert.assertEquals; - @RunWith(JMockit.class) -public class BackupServletTest -{ - private @Mocked PriamServer priamServer; +public class BackupServletTest { private IConfiguration config; - private @Mocked IBackupFileSystem bkpFs; - private @Mocked IBackupFileSystem bkpStatusFs; private @Mocked Restore restoreObj; - private @Mocked Provider pathProvider; - private @Mocked - ICassandraTuner tuner; private @Mocked SnapshotBackup snapshotBackup; - private @Mocked IPriamInstanceFactory factory; - private @Mocked ICassandraProcess cassProcess; - private @Mocked BackupStatusMgr bkupStatusMgr; private BackupServlet resource; private RestoreServlet restoreResource; - private BackupVerification backupVerification; + private InstanceInfo instanceInfo; @Before - public void setUp() - { + public void setUp() { Injector injector = Guice.createInjector(new BRTestModule()); config = injector.getInstance(IConfiguration.class); InstanceState instanceState = injector.getInstance(InstanceState.class); - ITokenManager tokenManager = new TokenManager(config); - resource = new BackupServlet(priamServer, config, bkpFs, bkpStatusFs, restoreObj, pathProvider, - tuner, snapshotBackup, factory, tokenManager, cassProcess, bkupStatusMgr,backupVerification); - - restoreResource = new RestoreServlet(config, restoreObj, pathProvider,priamServer, factory, tuner, cassProcess - , tokenManager, instanceState); + instanceInfo = injector.getInstance(InstanceInfo.class); + resource = injector.getInstance(BackupServlet.class); + restoreResource = injector.getInstance(RestoreServlet.class); } @Test - public void backup() throws Exception - { - new Expectations() {{ - snapshotBackup.execute(); - }}; - - Response response = resource.backup(); - assertEquals(200, response.getStatus()); - assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); - } - - @Test - public void restore_minimal(@Mocked final InstanceIdentity identity, - @Mocked final PriamInstance instance) throws Exception - { - final String dateRange = null; - final String newRegion = null; - final String newToken = null; - final String keyspaces = null; - - final String oldRegion = "us-east-1"; - final String oldToken = "1234"; - + public void backup() throws Exception { new Expectations() { { - priamServer.getId(); result = identity; times = 2; + snapshotBackup.execute(); } }; - new Expectations() { - - { - config.getDC(); result = oldRegion; - identity.getInstance(); result = instance; times = 2; - instance.getToken(); result = oldToken; - - config.isRestoreClosestToken(); result = false; - - restoreObj.restore((Date) any, (Date) any); // TODO: test default value - - config.setDC(oldRegion); - instance.setToken(oldToken); - tuner.updateAutoBootstrap(config.getYamlLocation(), false); - } - }; - - expectCassandraStartup(); - - Response response = restoreResource.restore(dateRange, newRegion, newToken, keyspaces, null); - assertEquals(200, response.getStatus()); - assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); - } - - @Test - public void restore_withDateRange(@Mocked final InstanceIdentity identity, - @Mocked final PriamInstance instance, @Mocked final AbstractBackupPath backupPath) throws Exception - { - final String dateRange = "201101010000,20111231259"; - final String newRegion = null; - final String newToken = null; - final String keyspaces = null; - - final String oldRegion = "us-east-1"; - final String oldToken = "1234"; - - new Expectations() { - { - priamServer.getId(); result = identity; times = 2; - } - }; - new Expectations() { - - { - pathProvider.get(); result = backupPath; - backupPath.parseDate(dateRange.split(",")[0]); result = new DateTime(2011, 01, 01, 00, 00).toDate(); times = 1; - backupPath.parseDate(dateRange.split(",")[1]); result = new DateTime(2011, 12, 31, 23, 59).toDate(); times = 1; - -// config.getDC(); result = oldRegion; - identity.getInstance(); result = instance; times = 2; - instance.getToken(); result = oldToken; - - // config.isRestoreClosestToken(); result = false; - - restoreObj.restore( - new DateTime(2011, 01, 01, 00, 00).toDate(), - new DateTime(2011, 12, 31, 23, 59).toDate()); - - // config.setDC(oldRegion); - instance.setToken(oldToken); - tuner.updateAutoBootstrap(config.getYamlLocation(), false); - } - }; - - expectCassandraStartup(); - - Response response = restoreResource.restore(dateRange, newRegion, newToken, keyspaces, null); + Response response = resource.backup(); assertEquals(200, response.getStatus()); assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); } -// @Test -// public void restore_withRegion() throws Exception -// { -// final String dateRange = null; -// final String newRegion = "us-west-1"; -// final String newToken = null; -// final String keyspaces = null; -// -// final String oldRegion = "us-east-1"; -// final String oldToken = "1234"; -// final String appName = "myApp"; -// -// new Expectations() { -// @NonStrict InstanceIdentity identity; -// PriamInstance instance; -// @NonStrict PriamInstance instance1, instance2, instance3; -// -// { -// config.getDC(); result = oldRegion; -// priamServer.getId(); result = identity; times = 3; -// identity.getInstance(); result = instance; times = 3; -// instance.getToken(); result = oldToken; -// -// config.isRestoreClosestToken(); result = false; -// -// config.setDC(newRegion); -// instance.getToken(); result = oldToken; -// config.getAppName(); result = appName; -// factory.getAllIds(appName); result = ImmutableList.of(instance, instance1, instance2, instance3); -// instance.getDC(); result = oldRegion; -// instance.getToken(); result = oldToken; -// instance1.getDC(); result = oldRegion; -// instance2.getDC(); result = oldRegion; -// instance3.getDC(); result = oldRegion; -// instance1.getToken(); result = "1234"; -// instance2.getToken(); result = "5678"; -// instance3.getToken(); result = "9000"; -// instance.setToken((String) any); // TODO: test mocked closest token -// -// restoreObj.restore((Date) any, (Date) any); // TODO: test default value -// -// config.setDC(oldRegion); -// instance.setToken(oldToken); -// tuneCassandra.writeAllProperties(false); -// } -// }; -// -// expectCassandraStartup(); -// -// Response response = resource.restore(dateRange, newRegion, newToken, keyspaces); -// assertEquals(200, response.getStatus()); -// assertEquals("[\"ok\"]", response.getEntity()); -// assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); -// } - @Test - public void restore_withToken(@Mocked final InstanceIdentity identity, - @Mocked final PriamInstance instance) throws Exception - { + public void restore_minimal() throws Exception { final String dateRange = null; - final String newRegion = null; - final String newToken = "myNewToken"; - final String keyspaces = null; - final String oldRegion = "us-east-1"; - final String oldToken = "1234"; - - new Expectations() { - { - priamServer.getId(); result = identity; times = 3; - } - }; new Expectations() { - { - config.getDC(); result = oldRegion; - identity.getInstance(); result = instance; times = 3; - instance.getToken(); result = oldToken; - instance.setToken(newToken); + instanceInfo.getRegion(); + result = oldRegion; - //config.isRestoreClosestToken(); result = false; - - restoreObj.restore((Date) any, (Date) any); // TODO: test default value - - config.setDC(oldRegion); - instance.setToken(oldToken); - tuner.updateAutoBootstrap(config.getYamlLocation(), false); + restoreObj.restore(new DateUtil.DateRange((Instant) any, (Instant) any)); } }; expectCassandraStartup(); - Response response = restoreResource.restore(dateRange, newRegion, newToken, keyspaces, null); + Response response = restoreResource.restore(dateRange); assertEquals(200, response.getStatus()); assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); } @Test - public void restore_withKeyspaces(@Mocked final InstanceIdentity identity, - @Mocked final PriamInstance instance) throws Exception - { - final String dateRange = null; - final String newRegion = null; - final String newToken = null; - final String keyspaces = "keyspace1,keyspace2"; - - final String oldRegion = "us-east-1"; - final String oldToken = "1234"; - - new Expectations() { - { - config.getDC(); result = oldRegion; - config.isRestoreClosestToken(); result = false; - - List restoreKeyspaces = Lists.newArrayList(); - restoreKeyspaces.clear(); - restoreKeyspaces.addAll(ImmutableList.of("keyspace1", "keyspace2")); + public void restore_withDateRange() throws Exception { + final String dateRange = "201101010000,201112312359"; - config.getRestoreKeySpaces(); result = restoreKeyspaces; - config.setDC(oldRegion); - priamServer.getId(); result = identity; times = 2; - } - }; new Expectations() { { - identity.getInstance(); result = instance; times = 2; - instance.getToken(); result = oldToken; - - restoreObj.restore((Date) any, (Date) any); // TODO: test default value - - instance.setToken(oldToken); - tuner.updateAutoBootstrap(config.getYamlLocation(), false); + DateUtil.getDate(dateRange.split(",")[0]); + result = new DateTime(2011, 1, 1, 0, 0).toDate(); + times = 1; + DateUtil.getDate(dateRange.split(",")[1]); + result = new DateTime(2011, 12, 31, 23, 59).toDate(); + times = 1; + restoreObj.restore(new DateUtil.DateRange(dateRange)); } }; expectCassandraStartup(); - Response response = restoreResource.restore(dateRange, newRegion, newToken, keyspaces, null); + Response response = restoreResource.restore(dateRange); assertEquals(200, response.getStatus()); assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); } - // TODO: this should also set/test newRegion and keyspaces - @Test - public void restore_maximal(@Mocked final InstanceIdentity identity, - @Mocked final PriamInstance instance, @Mocked final PriamInstance instance1, - @Mocked final PriamInstance instance2, @Mocked final PriamInstance instance3, - @Mocked final AbstractBackupPath backupPath) throws Exception - { - final String dateRange = "201101010000,20111231259"; - final String newRegion = null; - final String newToken = "5678"; - final String keyspaces = null; - - final String oldRegion = "us-east-1"; - final String oldToken = "1234"; - final String appName = "myApp"; - - instance.setDC(oldRegion); - instance1.setDC(oldRegion); - instance2.setDC(oldRegion); - instance3.setDC(oldRegion); - instance.setToken(oldToken); - instance1.setToken("1234"); - instance2.setToken("5678"); - instance3.setToken("9000"); - + // TODO: create CassandraController interface and inject, instead of static util method + private void expectCassandraStartup() { new Expectations() { - { - pathProvider.get(); result = backupPath; - backupPath.parseDate(dateRange.split(",")[0]); result = new DateTime(2011, 01, 01, 00, 00).toDate(); times = 1; - backupPath.parseDate(dateRange.split(",")[1]); result = new DateTime(2011, 12, 31, 23, 59).toDate(); times = 1; - -// identity.getInstance(); result = instance; times = 5; -// instance.getToken(); result = oldToken; -// instance.setToken(newToken); -// -// instance.getToken(); result = oldToken; -// factory.getAllIds(appName); result = ImmutableList.of(instance, instance1, instance2, instance3); -// instance.getDC(); result = oldRegion; -// instance.getToken(); result = oldToken; -// instance1.getDC(); result = oldRegion; -// instance2.getDC(); result = oldRegion; -// instance3.getDC(); result = oldRegion; -// instance1.getToken(); result = "1234"; -// instance2.getToken(); result = "5678"; -// instance3.getToken(); result = "9000"; -// instance.setToken((String) any); // TODO: test mocked closest token - - restoreObj.restore( - new DateTime(2011, 01, 01, 00, 00).toDate(), - new DateTime(2011, 12, 31, 23, 59).toDate()); - - // instance.setToken(oldToken); - tuner.updateAutoBootstrap(config.getYamlLocation(), false); + config.getCassStartupScript(); + result = "/usr/bin/false"; + config.getHeapNewSize(); + result = "2G"; + config.getHeapSize(); + result = "8G"; + config.getDataFileLocation(); + result = "/var/lib/cassandra/data"; + config.getCommitLogLocation(); + result = "/var/lib/cassandra/commitlog"; + config.getBackupLocation(); + result = "backup"; + config.getCacheLocation(); + result = "/var/lib/cassandra/saved_caches"; + config.getJmxPort(); + result = 7199; + config.getMaxDirectMemory(); + result = "50G"; } }; - - expectCassandraStartup(); - - Response response = restoreResource.restore(dateRange, newRegion, newToken, keyspaces, null); - assertEquals(200, response.getStatus()); - assertEquals("[\"ok\"]", response.getEntity()); - assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); - } - - // TODO: create CassandraController interface and inject, instead of static util method - private Expectations expectCassandraStartup() { - return new Expectations() {{ - config.getCassStartupScript(); result = "/usr/bin/false"; - config.getHeapNewSize(); result = "2G"; - config.getHeapSize(); result = "8G"; - config.getDataFileLocation(); result = "/var/lib/cassandra/data"; - config.getCommitLogLocation(); result = "/var/lib/cassandra/commitlog"; - config.getBackupLocation(); result = "backup"; - config.getCacheLocation(); result = "/var/lib/cassandra/saved_caches"; - config.getJmxPort(); result = 7199; - config.getMaxDirectMemory(); result = "50G"; - }}; } } diff --git a/priam/src/test/java/com/netflix/priam/resources/BackupServletV2Test.java b/priam/src/test/java/com/netflix/priam/resources/BackupServletV2Test.java new file mode 100644 index 000000000..c4729aa71 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/resources/BackupServletV2Test.java @@ -0,0 +1,358 @@ +package com.netflix.priam.resources; + +import static org.junit.Assert.assertEquals; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.netflix.priam.backup.*; +import com.netflix.priam.backupv2.MetaV2Proxy; +import com.netflix.priam.backupv2.SnapshotMetaTask; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.restore.Restore; +import com.netflix.priam.utils.DateUtil; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.SimpleDateFormat; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Optional; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import mockit.Expectations; +import mockit.Mocked; +import org.joda.time.DateTime; +import org.junit.Before; +import org.junit.Test; + +public class BackupServletV2Test { + private IConfiguration config; + private @Mocked Restore restoreObj; + private @Mocked SnapshotMetaTask snapshotBackup; + private @Mocked BackupVerification backupVerification; + private @Mocked FileSnapshotStatusMgr backupStatusMgr; + private @Mocked BackupRestoreUtil backupRestoreUtil; + private @Mocked MetaV2Proxy metaV2Proxy; + private BackupServletV2 resource; + private RestoreServlet restoreResource; + private InstanceInfo instanceInfo; + private static final String backupDate = "201812011000"; + private static final Path location = + Paths.get( + "some_bucket/casstestbackup/1049_fake-app/1808575600", + AbstractBackupPath.BackupFileType.META_V2.toString(), + "1859817645000", + "SNAPPY", + "PLAINTEXT", + "meta_v2_201812011000.json"); + private static Provider pathProvider; + private static IConfiguration configuration; + + @Before + public void setUp() { + Injector injector = Guice.createInjector(new BRTestModule()); + config = injector.getInstance(IConfiguration.class); + instanceInfo = injector.getInstance(InstanceInfo.class); + resource = injector.getInstance(BackupServletV2.class); + restoreResource = injector.getInstance(RestoreServlet.class); + pathProvider = injector.getProvider(AbstractBackupPath.class); + configuration = injector.getInstance(IConfiguration.class); + } + + @Test + public void testBackup() throws Exception { + new Expectations() { + { + snapshotBackup.execute(); + } + }; + + Response response = resource.backup(); + assertEquals(200, response.getStatus()); + assertEquals("[\"ok\"]", response.getEntity()); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + } + + @Test + public void testRestoreMinimal() throws Exception { + final String dateRange = null; + final String oldRegion = "us-east-1"; + new Expectations() { + { + instanceInfo.getRegion(); + result = oldRegion; + + restoreObj.restore(new DateUtil.DateRange((Instant) any, (Instant) any)); + } + }; + + expectCassandraStartup(); + + Response response = restoreResource.restore(dateRange); + assertEquals(200, response.getStatus()); + assertEquals("[\"ok\"]", response.getEntity()); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + } + + @Test + public void testRestoreWithDateRange() throws Exception { + final String dateRange = "201101010000,201112312359"; + + new Expectations() { + + { + DateUtil.getDate(dateRange.split(",")[0]); + result = new DateTime(2011, 1, 1, 0, 0).toDate(); + times = 1; + DateUtil.getDate(dateRange.split(",")[1]); + result = new DateTime(2011, 12, 31, 23, 59).toDate(); + times = 1; + restoreObj.restore(new DateUtil.DateRange(dateRange)); + } + }; + + expectCassandraStartup(); + + Response response = restoreResource.restore(dateRange); + assertEquals(200, response.getStatus()); + assertEquals("[\"ok\"]", response.getEntity()); + assertEquals( + MediaType.APPLICATION_JSON_TYPE, response.getMetadata().get("Content-Type").get(0)); + } + + // TODO: create CassandraController interface and inject, instead of static util method + private void expectCassandraStartup() { + new Expectations() { + { + config.getCassStartupScript(); + result = "/usr/bin/false"; + config.getHeapNewSize(); + result = "2G"; + config.getHeapSize(); + result = "8G"; + config.getDataFileLocation(); + result = "/var/lib/cassandra/data"; + config.getCommitLogLocation(); + result = "/var/lib/cassandra/commitlog"; + config.getBackupLocation(); + result = "backup"; + config.getCacheLocation(); + result = "/var/lib/cassandra/saved_caches"; + config.getJmxPort(); + result = 7199; + config.getMaxDirectMemory(); + result = "50G"; + } + }; + } + + @Test + public void testValidate() throws Exception { + new Expectations() { + { + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + anyBoolean, + new DateUtil.DateRange((Instant) any, (Instant) any)); + result = Optional.of(getBackupVerificationResult()); + } + }; + Response response = + resource.validateV2SnapshotByDate( + new DateUtil.DateRange(Instant.now(), Instant.now()).toString(), true); + assertEquals(200, response.getStatus()); + assertEquals( + GsonJsonSerializer.getGson().toJson(getBackupVerificationResult()), + response.getEntity().toString()); + } + + @Test + public void testValidateNoBackups() throws Exception { + new Expectations() { + { + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + anyBoolean, + new DateUtil.DateRange((Instant) any, (Instant) any)); + result = Optional.empty(); + } + }; + Response response = + resource.validateV2SnapshotByDate( + new DateUtil.DateRange(Instant.now(), Instant.now()).toString(), true); + assertEquals(204, response.getStatus()); + assertEquals( + response.getEntity().toString(), "No valid meta found for provided time range"); + } + + @Test + public void testValidateV2SnapshotByDate() throws Exception { + new Expectations() { + { + backupVerification.verifyBackup( + BackupVersion.SNAPSHOT_META_SERVICE, + anyBoolean, + new DateUtil.DateRange((Instant) any, (Instant) any)); + result = Optional.of(getBackupVerificationResult()); + } + }; + Response response = + resource.validateV2SnapshotByDate( + new DateUtil.DateRange(Instant.now(), Instant.now()).toString(), true); + assertEquals(200, response.getStatus()); + assertEquals( + GsonJsonSerializer.getGson().toJson(getBackupVerificationResult()), + response.getEntity().toString()); + } + + // @Test + // public void testListDateRange() throws Exception { + // Optional abstractBackupPath = getAbstractBackupPath(); + // String dateRange = String.format("%s,%s", + // new SimpleDateFormat("yyyymmddhhmm").format(new Date()) + // , new SimpleDateFormat("yyyymmddhhmm").format(new Date())); + // new Expectations() {{ + // backupRestoreUtil.getLatestValidMetaPath(metaV2Proxy, + // new DateUtil.DateRange((Instant) any, (Instant) any)); result = + // abstractBackupPath; + // + // backupRestoreUtil.getAllFiles( + // abstractBackupPath.get(), + // new DateUtil.DateRange((Instant) any, (Instant) any), metaV2Proxy, + // pathProvider); result = getBackupPathList(); + // }}; + // + // Response response = + // resource.list(dateRange); + // assertEquals(200, response.getStatus()); + // } + + @Test + public void testListDateRangeNoBackups() throws Exception { + String dateRange = + String.format( + "%s,%s", + new SimpleDateFormat("yyyymmdd").format(new Date()), + new SimpleDateFormat("yyyymmdd").format(new Date())); + + new Expectations() { + { + backupRestoreUtil.getLatestValidMetaPath( + metaV2Proxy, new DateUtil.DateRange((Instant) any, (Instant) any)); + result = Optional.empty(); + } + }; + Response response = resource.list(dateRange); + assertEquals(200, response.getStatus()); + assertEquals(response.getEntity().toString(), "No valid meta found!"); + } + + @Test + public void testBackUpInfo() throws Exception { + List backupMetadataList = new ArrayList<>(); + backupMetadataList.add(getBackupMetaData()); + new Expectations() { + { + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateUtil.DateRange((Instant) any, (Instant) any)); + result = backupMetadataList; + } + }; + Response response = resource.info(backupDate); + assertEquals(200, response.getStatus()); + assertEquals( + GsonJsonSerializer.getGson().toJson(backupMetadataList), + response.getEntity().toString()); + } + + @Test + public void testBackUpInfoNoBackups() { + new Expectations() { + { + backupStatusMgr.getLatestBackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + new DateUtil.DateRange((Instant) any, (Instant) any)); + result = new ArrayList<>(); + } + }; + Response response = resource.info(backupDate); + assertEquals(200, response.getStatus()); + assertEquals( + GsonJsonSerializer.getGson().toJson(new ArrayList<>()), + response.getEntity().toString()); + } + + private static BackupVerificationResult getBackupVerificationResult() { + BackupVerificationResult result = new BackupVerificationResult(); + result.valid = true; + result.manifestAvailable = true; + result.remotePath = "some_random"; + result.filesMatched = 123; + result.snapshotInstant = Instant.EPOCH; + return result; + } + + private static BackupMetadata getBackupMetaData() throws Exception { + BackupMetadata backupMetadata = + new BackupMetadata( + BackupVersion.SNAPSHOT_META_SERVICE, + "123", + new Date(DateUtil.parseInstant(backupDate).toEpochMilli())); + backupMetadata.setCompleted( + new Date( + DateUtil.parseInstant(backupDate) + .plus(30, ChronoUnit.MINUTES) + .toEpochMilli())); + backupMetadata.setStatus(Status.FINISHED); + backupMetadata.setSnapshotLocation(location.toString()); + return backupMetadata; + } + + private static Optional getAbstractBackupPath() throws Exception { + Path path = + Paths.get( + configuration.getDataFileLocation(), + "keyspace1", + "columnfamily1", + "backup", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath = pathProvider.get(); + abstractBackupPath.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.SST_V2); + return Optional.of(abstractBackupPath); + } + + private static List getBackupPathList() throws Exception { + List abstractBackupPathList = new ArrayList<>(); + Path path = + Paths.get( + configuration.getDataFileLocation(), + "keyspace1", + "columnfamily1", + "backup", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath1 = pathProvider.get(); + abstractBackupPath1.parseLocal(path.toFile(), AbstractBackupPath.BackupFileType.SST_V2); + abstractBackupPathList.add(abstractBackupPath1); + + path = + Paths.get( + configuration.getDataFileLocation(), + "keyspace1", + "columnfamily1", + "backup", + "mc-1234-Data.db"); + AbstractBackupPath abstractBackupPath2 = pathProvider.get(); + abstractBackupPath2.parseLocal( + path.toFile(), AbstractBackupPath.BackupFileType.SNAPSHOT_VERIFIED); + abstractBackupPathList.add(abstractBackupPath2); + return abstractBackupPathList; + } +} diff --git a/priam/src/test/java/com/netflix/priam/resources/CassandraConfigTest.java b/priam/src/test/java/com/netflix/priam/resources/CassandraConfigTest.java index e0521018c..d590c5f48 100644 --- a/priam/src/test/java/com/netflix/priam/resources/CassandraConfigTest.java +++ b/priam/src/test/java/com/netflix/priam/resources/CassandraConfigTest.java @@ -17,11 +17,22 @@ package com.netflix.priam.resources; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import com.google.common.collect.ImmutableList; +import com.google.inject.Guice; import com.netflix.priam.PriamServer; +import com.netflix.priam.backup.BRTestModule; import com.netflix.priam.identity.DoubleRing; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.identity.PriamInstance; +import com.netflix.priam.merics.CassMonitorMetrics; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.List; +import javax.ws.rs.core.Response; import mockit.Expectations; import mockit.Mocked; import mockit.integration.junit4.JMockit; @@ -29,35 +40,33 @@ import org.junit.Test; import org.junit.runner.RunWith; -import javax.ws.rs.core.Response; -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - @RunWith(JMockit.class) -public class CassandraConfigTest -{ +public class CassandraConfigTest { private @Mocked PriamServer priamServer; private @Mocked DoubleRing doubleRing; private CassandraConfig resource; + private InstanceIdentity instanceIdentity; @Before - public void setUp() - { - resource = new CassandraConfig(priamServer, doubleRing); + public void setUp() { + CassMonitorMetrics cassMonitorMetrics = + Guice.createInjector(new BRTestModule()).getInstance(CassMonitorMetrics.class); + instanceIdentity = + Guice.createInjector(new BRTestModule()).getInstance(InstanceIdentity.class); + resource = new CassandraConfig(priamServer, doubleRing, cassMonitorMetrics); } @Test - public void getSeeds(@Mocked final InstanceIdentity identity) throws Exception - { + public void getSeeds(@Mocked final InstanceIdentity identity) throws Exception { final List seeds = ImmutableList.of("seed1", "seed2", "seed3"); new Expectations() { { - priamServer.getId(); result = identity; times = 1; - identity.getSeeds(); result = seeds; times = 1; + priamServer.getInstanceIdentity(); + result = identity; + times = 1; + identity.getSeeds(); + result = seeds; + times = 1; } }; @@ -67,13 +76,16 @@ public void getSeeds(@Mocked final InstanceIdentity identity) throws Exception } @Test - public void getSeeds_notFound(@Mocked final InstanceIdentity identity) throws Exception - { + public void getSeeds_notFound(@Mocked final InstanceIdentity identity) throws Exception { final List seeds = ImmutableList.of(); new Expectations() { { - priamServer.getId(); result = identity; times = 1; - identity.getSeeds(); result = seeds; times = 1; + priamServer.getInstanceIdentity(); + result = identity; + times = 1; + identity.getSeeds(); + result = seeds; + times = 1; } }; @@ -82,12 +94,14 @@ public void getSeeds_notFound(@Mocked final InstanceIdentity identity) throws Ex } @Test - public void getSeeds_handlesUnknownHostException(@Mocked final InstanceIdentity identity) throws Exception - { + public void getSeeds_handlesUnknownHostException(@Mocked final InstanceIdentity identity) + throws Exception { new Expectations() { { - priamServer.getId(); result = identity; - identity.getSeeds(); result = new UnknownHostException(); + priamServer.getInstanceIdentity(); + result = identity; + identity.getSeeds(); + result = new UnknownHostException(); } }; @@ -96,14 +110,20 @@ public void getSeeds_handlesUnknownHostException(@Mocked final InstanceIdentity } @Test - public void getToken(@Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) - { + public void getToken( + @Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) { final String token = "myToken"; new Expectations() { { - priamServer.getId(); result = identity; times = 2; - identity.getInstance(); result = instance; times = 2; - instance.getToken(); result = token; times = 2; + priamServer.getInstanceIdentity(); + result = identity; + times = 2; + identity.getInstance(); + result = instance; + times = 2; + instance.getToken(); + result = token; + times = 2; } }; @@ -113,14 +133,17 @@ public void getToken(@Mocked final InstanceIdentity identity, @Mocked final Pria } @Test - public void getToken_notFound(@Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) - { + public void getToken_notFound( + @Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) { final String token = ""; new Expectations() { { - priamServer.getId(); result = identity; - identity.getInstance(); result = instance; - instance.getToken(); result = token; + priamServer.getInstanceIdentity(); + result = identity; + identity.getInstance(); + result = instance; + instance.getToken(); + result = token; } }; @@ -129,13 +152,16 @@ public void getToken_notFound(@Mocked final InstanceIdentity identity, @Mocked f } @Test - public void getToken_handlesException(@Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) - { + public void getToken_handlesException( + @Mocked final InstanceIdentity identity, @Mocked final PriamInstance instance) { new Expectations() { { - priamServer.getId(); result = identity; - identity.getInstance(); result = instance; - instance.getToken(); result = new RuntimeException(); + priamServer.getInstanceIdentity(); + result = identity; + identity.getInstance(); + result = instance; + instance.getToken(); + result = new RuntimeException(); } }; @@ -144,12 +170,13 @@ public void getToken_handlesException(@Mocked final InstanceIdentity identity, @ } @Test - public void isReplaceToken(@Mocked final InstanceIdentity identity) - { + public void isReplaceToken(@Mocked final InstanceIdentity identity) { new Expectations() { { - priamServer.getId(); result = identity; - identity.isReplace(); result = true; + priamServer.getInstanceIdentity(); + result = identity; + identity.isReplace(); + result = true; } }; @@ -159,12 +186,13 @@ public void isReplaceToken(@Mocked final InstanceIdentity identity) } @Test - public void isReplaceToken_handlesException(@Mocked final InstanceIdentity identity) - { + public void isReplaceToken_handlesException(@Mocked final InstanceIdentity identity) { new Expectations() { { - priamServer.getId(); result = identity; - identity.isReplace(); result = new RuntimeException(); + priamServer.getInstanceIdentity(); + result = identity; + identity.isReplace(); + result = new RuntimeException(); } }; @@ -173,13 +201,14 @@ public void isReplaceToken_handlesException(@Mocked final InstanceIdentity ident } @Test - public void getReplacedAddress(@Mocked final InstanceIdentity identity) - { - final String replacedIp = "127.0.0.1"; + public void getReplacedAddress(@Mocked final InstanceIdentity identity) { + final String replacedIp = "127.0.0.1"; new Expectations() { { - priamServer.getId(); result = identity; - identity.getReplacedIp(); result = replacedIp; + priamServer.getInstanceIdentity(); + result = identity; + identity.getReplacedIp(); + result = replacedIp; } }; @@ -187,58 +216,82 @@ public void getReplacedAddress(@Mocked final InstanceIdentity identity) assertEquals(200, response.getStatus()); assertEquals(replacedIp, response.getEntity()); } - + + @Test + public void setReplacedIp() { + new Expectations() { + { + priamServer.getInstanceIdentity(); + result = instanceIdentity; + } + }; + + Response response = resource.setReplacedIp("127.0.0.1"); + assertEquals(200, response.getStatus()); + assertEquals("127.0.0.1", instanceIdentity.getReplacedIp()); + assertTrue(instanceIdentity.isReplace()); + + response = resource.setReplacedIp(null); + assertEquals(400, response.getStatus()); + } + @Test - public void doubleRing() throws Exception - { - new Expectations() {{ - doubleRing.backup(); - doubleRing.doubleSlots(); - }}; + public void doubleRing() throws Exception { + new Expectations() { + { + doubleRing.backup(); + doubleRing.doubleSlots(); + } + }; Response response = resource.doubleRing(); assertEquals(200, response.getStatus()); } @Test - public void doubleRing_ioExceptionInBackup() throws Exception - { + public void doubleRing_ioExceptionInBackup() throws Exception { final IOException exception = new IOException(); - new Expectations() {{ - doubleRing.backup(); result = exception; - doubleRing.restore(); - }}; - - try - { - resource.doubleRing(); - fail("Excepted RuntimeException"); - } - catch (RuntimeException e) - { - assertEquals(exception, e.getCause()); + new Expectations() { + { + doubleRing.backup(); + result = exception; + doubleRing.restore(); + } + }; + + try { + resource.doubleRing(); + fail("Excepted RuntimeException"); + } catch (RuntimeException e) { + assertEquals(exception, e.getCause()); } } - @Test(expected=IOException.class) - public void doubleRing_ioExceptionInRestore() throws Exception - { - new Expectations() {{ - doubleRing.backup(); result = new IOException(); - doubleRing.restore(); result = new IOException(); - }}; + @Test(expected = IOException.class) + public void doubleRing_ioExceptionInRestore() throws Exception { + new Expectations() { + { + doubleRing.backup(); + result = new IOException(); + doubleRing.restore(); + result = new IOException(); + } + }; resource.doubleRing(); } - @Test(expected=ClassNotFoundException.class) - public void doubleRing_classNotFoundExceptionInRestore() throws Exception - { - new Expectations() {{ - doubleRing.backup(); result = new IOException(); - doubleRing.restore(); result = new ClassNotFoundException(); - }}; + @Test(expected = ClassNotFoundException.class) + public void doubleRing_classNotFoundExceptionInRestore() throws Exception { + new Expectations() { + { + doubleRing.backup(); + result = new IOException(); + doubleRing.restore(); + result = new ClassNotFoundException(); + } + }; resource.doubleRing(); } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/resources/PriamConfigTest.java b/priam/src/test/java/com/netflix/priam/resources/PriamConfigTest.java new file mode 100644 index 000000000..cead22ab1 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/resources/PriamConfigTest.java @@ -0,0 +1,116 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.netflix.priam.resources; + +import static org.junit.Assert.*; + +import com.netflix.priam.PriamServer; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.utils.GsonJsonSerializer; +import java.util.HashMap; +import java.util.Map; +import javax.ws.rs.core.Response; +import mockit.Expectations; +import mockit.Mocked; +import mockit.integration.junit4.JMockit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(JMockit.class) +public class PriamConfigTest { + private @Mocked PriamServer priamServer; + + private PriamConfig resource; + + private FakeConfiguration fakeConfiguration; + + @Before + public void setUp() { + resource = new PriamConfig(priamServer); + fakeConfiguration = new FakeConfiguration("cass_test"); + fakeConfiguration.fakeProperties.put("test.prop", "test_value"); + } + + @Test + public void getPriamConfig() { + new Expectations() { + { + priamServer.getConfiguration(); + result = fakeConfiguration; + times = 3; + } + }; + + Response response = resource.getPriamConfig("all"); + assertEquals(200, response.getStatus()); + + Map result = + GsonJsonSerializer.getGson().fromJson(response.getEntity().toString(), Map.class); + assertNotNull(result); + assertTrue(!result.isEmpty()); + + final Map expected = new HashMap<>(); + expected.put("backupLocation", "casstestbackup"); + String expectedJsonString = GsonJsonSerializer.getGson().toJson(expected); + response = resource.getPriamConfigByName("all", "backupLocation"); + assertEquals(200, response.getStatus()); + assertEquals(expectedJsonString, response.getEntity()); + result = GsonJsonSerializer.getGson().fromJson(response.getEntity().toString(), Map.class); + assertEquals(result, expected); + + Response badResponse = resource.getPriamConfigByName("all", "getUnrealThing"); + assertEquals(404, badResponse.getStatus()); + } + + @Test + public void getProperty() { + final Map expected = new HashMap<>(); + expected.put("test.prop", "test_value"); + new Expectations() { + { + priamServer.getConfiguration(); + result = fakeConfiguration; + times = 3; + } + }; + + String expectedJsonString = GsonJsonSerializer.getGson().toJson(expected); + Response response = resource.getProperty("test.prop", null); + assertEquals(200, response.getStatus()); + assertEquals(expectedJsonString, response.getEntity()); + + Map result = + GsonJsonSerializer.getGson().fromJson(response.getEntity().toString(), Map.class); + assertNotNull(result); + assertTrue(!result.isEmpty()); + + Response defaultResponse = resource.getProperty("not.a.property", "NOVALUE"); + expected.clear(); + expected.put("not.a.property", "NOVALUE"); + expectedJsonString = GsonJsonSerializer.getGson().toJson(expected); + assertEquals(200, defaultResponse.getStatus()); + assertEquals(expectedJsonString, defaultResponse.getEntity()); + result = + GsonJsonSerializer.getGson() + .fromJson(defaultResponse.getEntity().toString(), Map.class); + assertEquals(result, expected); + + Response badResponse = resource.getProperty("not.a.property", null); + assertEquals(404, badResponse.getStatus()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/resources/PriamInstanceResourceTest.java b/priam/src/test/java/com/netflix/priam/resources/PriamInstanceResourceTest.java index d0d9ad59b..9d1cde4cc 100644 --- a/priam/src/test/java/com/netflix/priam/resources/PriamInstanceResourceTest.java +++ b/priam/src/test/java/com/netflix/priam/resources/PriamInstanceResourceTest.java @@ -17,55 +17,58 @@ package com.netflix.priam.resources; -import java.util.List; - -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; -import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.IPriamInstanceFactory; import com.netflix.priam.identity.PriamInstance; - +import com.netflix.priam.identity.config.InstanceInfo; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; import mockit.Expectations; import mockit.Mocked; import mockit.integration.junit4.JMockit; - import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - @RunWith(JMockit.class) -public class PriamInstanceResourceTest -{ +public class PriamInstanceResourceTest { private static final String APP_NAME = "myApp"; private static final int NODE_ID = 3; private @Mocked IConfiguration config; private @Mocked IPriamInstanceFactory factory; + private @Mocked InstanceInfo instanceInfo; private PriamInstanceResource resource; @Before - public void setUp() - { - resource = new PriamInstanceResource(config, factory); + public void setUp() { + resource = new PriamInstanceResource(config, factory, instanceInfo); } @Test - public void getInstances(@Mocked final PriamInstance instance1, @Mocked final PriamInstance instance2, @Mocked final PriamInstance instance3) - { + public void getInstances( + @Mocked final PriamInstance instance1, + @Mocked final PriamInstance instance2, + @Mocked final PriamInstance instance3) { new Expectations() { - List instances = ImmutableList.of(instance1, instance2, instance3); + final ImmutableSet instances = + ImmutableSet.of(instance1, instance2, instance3); { - config.getAppName(); result = APP_NAME; - factory.getAllIds(APP_NAME); result = instances; - instance1.toString(); result = "instance1"; - instance2.toString(); result = "instance2"; - instance3.toString(); result = "instance3"; + config.getAppName(); + result = APP_NAME; + factory.getAllIds(APP_NAME); + result = instances; + instance1.toString(); + result = "instance1"; + instance2.toString(); + result = "instance2"; + instance3.toString(); + result = "instance3"; } }; @@ -73,14 +76,16 @@ public void getInstances(@Mocked final PriamInstance instance1, @Mocked final Pr } @Test - public void getInstance(@Mocked final PriamInstance instance) - { + public void getInstance(@Mocked final PriamInstance instance) { final String expected = "plain text describing the instance"; new Expectations() { { - config.getAppName(); result = APP_NAME; - factory.getInstance(APP_NAME, config.getDC(), NODE_ID); result = instance; - instance.toString(); result = expected; + config.getAppName(); + result = APP_NAME; + factory.getInstance(APP_NAME, instanceInfo.getRegion(), NODE_ID); + result = instance; + instance.toString(); + result = expected; } }; @@ -88,27 +93,28 @@ public void getInstance(@Mocked final PriamInstance instance) } @Test - public void getInstance_notFound() - { - new Expectations() {{ - config.getAppName(); result = APP_NAME; - factory.getInstance(APP_NAME, config.getDC(), NODE_ID); result = null; - }}; - - try - { + public void getInstance_notFound() { + new Expectations() { + { + config.getAppName(); + result = APP_NAME; + factory.getInstance(APP_NAME, instanceInfo.getRegion(), NODE_ID); + result = null; + } + }; + + try { resource.getInstance(NODE_ID); fail("Expected WebApplicationException thrown"); - } catch(WebApplicationException e) - { + } catch (WebApplicationException e) { assertEquals(404, e.getResponse().getStatus()); - assertEquals("No priam instance with id " + NODE_ID + " found", e.getResponse().getEntity()); + assertEquals( + "No priam instance with id " + NODE_ID + " found", e.getResponse().getEntity()); } } @Test - public void createInstance(@Mocked final PriamInstance instance) - { + public void createInstance(@Mocked final PriamInstance instance) { final String instanceID = "i-abc123"; final String hostname = "dom.com"; final String ip = "123.123.123.123"; @@ -116,27 +122,31 @@ public void createInstance(@Mocked final PriamInstance instance) final String token = "1234567890"; new Expectations() { - { - config.getAppName(); result = APP_NAME; - factory.create(APP_NAME, NODE_ID, instanceID, hostname, ip, rack, null, token); result = instance; - instance.getId(); result = NODE_ID; - } + { + config.getAppName(); + result = APP_NAME; + factory.create(APP_NAME, NODE_ID, instanceID, hostname, ip, rack, null, token); + result = instance; + instance.getId(); + result = NODE_ID; + } }; Response response = resource.createInstance(NODE_ID, instanceID, hostname, ip, rack, token); assertEquals(201, response.getStatus()); - assertEquals("/"+NODE_ID, response.getMetadata().getFirst("location").toString()); + assertEquals("/" + NODE_ID, response.getMetadata().getFirst("location").toString()); } @Test - public void deleteInstance(@Mocked final PriamInstance instance) - { + public void deleteInstance(@Mocked final PriamInstance instance) { new Expectations() { - { - config.getAppName(); result = APP_NAME; - factory.getInstance(APP_NAME, config.getDC(), NODE_ID); result = instance; - factory.delete(instance); - } + { + config.getAppName(); + result = APP_NAME; + factory.getInstance(APP_NAME, instanceInfo.getRegion(), NODE_ID); + result = instance; + factory.delete(instance); + } }; Response response = resource.deleteInstance(NODE_ID); @@ -144,21 +154,23 @@ public void deleteInstance(@Mocked final PriamInstance instance) } @Test - public void deleteInstance_notFound() - { - new Expectations() {{ - config.getAppName(); result = APP_NAME; - factory.getInstance(APP_NAME, config.getDC(), NODE_ID); result = null; - }}; - - try - { + public void deleteInstance_notFound() { + new Expectations() { + { + config.getAppName(); + result = APP_NAME; + factory.getInstance(APP_NAME, instanceInfo.getRegion(), NODE_ID); + result = null; + } + }; + + try { resource.getInstance(NODE_ID); fail("Expected WebApplicationException thrown"); - } catch(WebApplicationException e) - { + } catch (WebApplicationException e) { assertEquals(404, e.getResponse().getStatus()); - assertEquals("No priam instance with id " + NODE_ID + " found", e.getResponse().getEntity()); + assertEquals( + "No priam instance with id " + NODE_ID + " found", e.getResponse().getEntity()); } } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/restore/TestPostRestoreHook.java b/priam/src/test/java/com/netflix/priam/restore/TestPostRestoreHook.java index 5eb7fadc6..68afaaa17 100644 --- a/priam/src/test/java/com/netflix/priam/restore/TestPostRestoreHook.java +++ b/priam/src/test/java/com/netflix/priam/restore/TestPostRestoreHook.java @@ -18,39 +18,40 @@ import com.google.inject.Guice; import com.google.inject.Injector; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.TestModule; +import com.netflix.priam.config.IConfiguration; +import java.io.File; import org.apache.commons.io.FileUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.io.File; - public class TestPostRestoreHook { - @Before @After + @Before + @After public void setup() { Injector inject = Guice.createInjector(new TestModule()); IConfiguration configuration = inject.getInstance(IConfiguration.class); - //ensure heartbeat and done files are not present + // ensure heartbeat and done files are not present File heartBeatFile = new File(configuration.getPostRestoreHookHeartbeatFileName()); - if(heartBeatFile.exists()) { + if (heartBeatFile.exists()) { heartBeatFile.delete(); } File doneFile = new File(configuration.getPostRestoreHookDoneFileName()); - if(doneFile.exists()) { + if (doneFile.exists()) { doneFile.delete(); } } @Test - /** - * Test to validate hasValidParameters. Expected to pass since none of the parameters in FakeConfiguration are blank - */ + /* + Test to validate hasValidParameters. Expected to pass since none of the parameters in + FakeConfiguration are blank + */ public void testPostRestoreHookValidParameters() { Injector inject = Guice.createInjector(new TestModule()); IPostRestoreHook postRestoreHook = inject.getInstance(IPostRestoreHook.class); @@ -58,60 +59,71 @@ public void testPostRestoreHookValidParameters() { } @Test - /** - * Test to validate execute method. This is a happy path since heart beat file is emited as soon as test case starts, and postrestorehook completes execution once the child process completes execution. - * Test fails in case of any exception. - */ + /* + Test to validate execute method. This is a happy path since heart beat file is emited as soon + as test case starts, and postrestorehook completes execution once the child process completes + execution. Test fails in case of any exception. + */ public void testPostRestoreHookExecuteHappyPath() throws Exception { Injector inject = Guice.createInjector(new TestModule()); IPostRestoreHook postRestoreHook = inject.getInstance(IPostRestoreHook.class); IConfiguration configuration = inject.getInstance(IConfiguration.class); - startHeartBeatThreadWithDelay(0, configuration.getPostRestoreHookHeartbeatFileName(), configuration.getPostRestoreHookDoneFileName()); + startHeartBeatThreadWithDelay( + 0, + configuration.getPostRestoreHookHeartbeatFileName(), + configuration.getPostRestoreHookDoneFileName()); postRestoreHook.execute(); } @Test - /** - * Test to validate execute method. This is a variant of above method, where heartbeat is produced after an initial delay. This delay causes PostRestoreHook to terminate the child process since there is - * no heartbeat multiple times, and eventually once the heartbeat starts, PostRestoreHook waits for the child process to complete execution. - * Test fails in case of any exception. - */ + /* + Test to validate execute method. This is a variant of above method, where heartbeat is + produced after an initial delay. This delay causes PostRestoreHook to terminate the child + process since there is no heartbeat multiple times, and eventually once the heartbeat starts, + PostRestoreHook waits for the child process to complete execution. Test fails in case of any + exception. + */ public void testPostRestoreHookExecuteHeartBeatDelay() throws Exception { Injector inject = Guice.createInjector(new TestModule()); IPostRestoreHook postRestoreHook = inject.getInstance(IPostRestoreHook.class); IConfiguration configuration = inject.getInstance(IConfiguration.class); - startHeartBeatThreadWithDelay(1000, configuration.getPostRestoreHookHeartbeatFileName(), configuration.getPostRestoreHookDoneFileName()); + startHeartBeatThreadWithDelay( + 1000, + configuration.getPostRestoreHookHeartbeatFileName(), + configuration.getPostRestoreHookDoneFileName()); postRestoreHook.execute(); } /** * Starts a thread to emit heartbeat and finish with a done file. + * * @param delayInMs any start up delay if needed * @param heartBeatfileName name of the heart beat file * @param doneFileName name of the done file */ - private void startHeartBeatThreadWithDelay(long delayInMs, String heartBeatfileName, String doneFileName) { - Thread heartBeatEmitThread = new Thread() { - public void run() { - File heartBeatFile = new File(heartBeatfileName); - try { - //add a delay to heartbeat - Thread.sleep(delayInMs); - if (!heartBeatFile.exists() && !heartBeatFile.createNewFile()) { - Assert.fail("Unable to create heartbeat file"); - } - for(int i = 0; i < 10; i++) { - FileUtils.touch(heartBeatFile); - Thread.sleep(1000); - } + private void startHeartBeatThreadWithDelay( + long delayInMs, String heartBeatfileName, String doneFileName) { + Thread heartBeatEmitThread = + new Thread( + () -> { + File heartBeatFile = new File(heartBeatfileName); + try { + // add a delay to heartbeat + Thread.sleep(delayInMs); + if (!heartBeatFile.exists() && !heartBeatFile.createNewFile()) { + Assert.fail("Unable to create heartbeat file"); + } + for (int i = 0; i < 10; i++) { + FileUtils.touch(heartBeatFile); + Thread.sleep(1000); + } - File doneFile = new File(doneFileName); - doneFile.createNewFile(); - } catch (Exception ex) { - Assert.fail(ex.getMessage()); - } - } - }; + File doneFile = new File(doneFileName); + doneFile.createNewFile(); + } catch (Exception ex) { + Assert.fail(ex.getMessage()); + } + }); heartBeatEmitThread.start(); } diff --git a/priam/src/test/java/com/netflix/priam/restore/TestRestore.java b/priam/src/test/java/com/netflix/priam/restore/TestRestore.java new file mode 100644 index 000000000..aeb169911 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/restore/TestRestore.java @@ -0,0 +1,162 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.restore; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.backup.FakeBackupFileSystem; +import com.netflix.priam.backup.Status; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.health.InstanceState; +import com.netflix.priam.identity.config.InstanceInfo; +import com.netflix.priam.utils.DateUtil; +import java.io.IOException; +import java.util.ArrayList; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestRestore { + private static FakeBackupFileSystem filesystem; + private static ArrayList fileList = new ArrayList<>(); + private static FakeConfiguration conf; + private static String region; + private static Restore restore; + private static InstanceState instanceState; + + @BeforeClass + public static void setup() throws InterruptedException, IOException { + Injector injector = Guice.createInjector(new BRTestModule()); + if (filesystem == null) filesystem = injector.getInstance(FakeBackupFileSystem.class); + if (conf == null) conf = (FakeConfiguration) injector.getInstance(IConfiguration.class); + region = injector.getInstance(InstanceInfo.class).getRegion(); + if (restore == null) restore = injector.getInstance(Restore.class); + if (instanceState == null) instanceState = injector.getInstance(InstanceState.class); + } + + private static void populateBackupFileSystem(String baseDir) { + fileList.clear(); + fileList.add(baseDir + "/" + region + "/fakecluster/123456/201108110030/META/meta.json"); + fileList.add( + baseDir + "/" + region + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f1.db"); + fileList.add( + baseDir + "/" + region + "/fakecluster/123456/201108110030/SNAP/ks1/cf1/f2.db"); + fileList.add( + baseDir + "/" + region + "/fakecluster/123456/201108110030/SNAP/ks2/cf1/f2.db"); + fileList.add(baseDir + "/" + region + "/fakecluster/123456/201108110530/SST/ks2/cf1/f3.db"); + fileList.add(baseDir + "/" + region + "/fakecluster/123456/201108110600/SST/ks2/cf1/f4.db"); + filesystem.setupTest(fileList); + conf.setRestorePrefix("RESTOREBUCKET/" + baseDir + "/" + region + "/fakecluster"); + } + + @Test + public void testRestore() throws Exception { + populateBackupFileSystem("test_backup"); + String dateRange = "201108110030,201108110530"; + restore.restore(new DateUtil.DateRange(dateRange)); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(0))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); + Assert.assertEquals(Status.FINISHED, instanceState.getRestoreStatus().getStatus()); + } + + @Test + public void testRestoreWithIncremental() throws Exception { + populateBackupFileSystem("test_backup"); + String dateRange = "201108110030,201108110730"; + restore.restore(new DateUtil.DateRange(dateRange)); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(0))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(4))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(5))); + Assert.assertEquals(Status.FINISHED, instanceState.getRestoreStatus().getStatus()); + } + + @Test + public void testRestoreLatestWithEmptyMeta() throws Exception { + populateBackupFileSystem("test_backup"); + String metafile = + "test_backup/" + region + "/fakecluster/123456/201108110130/META/meta.json"; + filesystem.addFile(metafile); + String dateRange = "201108110030,201108110530"; + restore.restore(new DateUtil.DateRange(dateRange)); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(0))); + Assert.assertTrue(filesystem.downloadedFiles.contains(metafile)); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(1))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(2))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(3))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); + Assert.assertEquals(Status.FINISHED, instanceState.getRestoreStatus().getStatus()); + Assert.assertEquals(metafile, instanceState.getRestoreStatus().getSnapshotMetaFile()); + } + + @Test + public void testRestoreLatest() throws Exception { + populateBackupFileSystem("test_backup"); + String metafile = + "test_backup/" + region + "/fakecluster/123456/201108110130/META/meta.json"; + filesystem.addFile(metafile); + String snapFile = + "test_backup/" + region + "/fakecluster/123456/201108110130/SNAP/ks1/cf1/f9.db"; + filesystem.addFile(snapFile); + String dateRange = "201108110030,201108110530"; + restore.restore(new DateUtil.DateRange(dateRange)); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(0))); + Assert.assertTrue(filesystem.downloadedFiles.contains(metafile)); + Assert.assertTrue(filesystem.downloadedFiles.contains(snapFile)); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(1))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(2))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(3))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); + Assert.assertEquals(Status.FINISHED, instanceState.getRestoreStatus().getStatus()); + Assert.assertEquals(metafile, instanceState.getRestoreStatus().getSnapshotMetaFile()); + } + + @Test + public void testNoSnapshots() throws Exception { + populateBackupFileSystem("test_backup"); + filesystem.setupTest(fileList); + String dateRange = "201109110030,201109110530"; + restore.restore(new DateUtil.DateRange(dateRange)); + Assert.assertEquals(Status.FAILED, instanceState.getRestoreStatus().getStatus()); + } + + @Test + public void testRestoreFromDiffCluster() throws Exception { + populateBackupFileSystem("test_backup_new"); + String dateRange = "201108110030,201108110530"; + restore.restore(new DateUtil.DateRange(dateRange)); + System.out.println("Downloaded files: " + filesystem.downloadedFiles); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(0))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(1))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(2))); + Assert.assertTrue(filesystem.downloadedFiles.contains(fileList.get(3))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(4))); + Assert.assertFalse(filesystem.downloadedFiles.contains(fileList.get(5))); + Assert.assertEquals(Status.FINISHED, instanceState.getRestoreStatus().getStatus()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/scheduler/TestGuiceSingleton.java b/priam/src/test/java/com/netflix/priam/scheduler/TestGuiceSingleton.java index d6fb5b74a..e0eff0417 100644 --- a/priam/src/test/java/com/netflix/priam/scheduler/TestGuiceSingleton.java +++ b/priam/src/test/java/com/netflix/priam/scheduler/TestGuiceSingleton.java @@ -17,18 +17,15 @@ package com.netflix.priam.scheduler; -import org.junit.Test; - import com.google.inject.AbstractModule; import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.Singleton; +import org.junit.Test; -public class TestGuiceSingleton -{ +public class TestGuiceSingleton { @Test - public void testSingleton() - { + public void testSingleton() { Injector injector = Guice.createInjector(new GModules()); injector.getInstance(EmptryInterface.class).print(); injector.getInstance(EmptryInterface.class).print(); @@ -38,36 +35,29 @@ public void testSingleton() printInjected(); printInjected(); } - - public void printInjected() - { + + public void printInjected() { Injector injector = Guice.createInjector(new GModules()); injector.getInstance(EmptryInterface.class).print(); } - public interface EmptryInterface - { - String print(); + interface EmptryInterface { + void print(); } @Singleton - public static class GuiceSingleton implements EmptryInterface - { + public static class GuiceSingleton implements EmptryInterface { - public String print() - { + public void print() { System.out.println(this.toString()); - return this.toString(); + this.toString(); } } - public static class GModules extends AbstractModule - { + public static class GModules extends AbstractModule { @Override - protected void configure() - { + protected void configure() { bind(EmptryInterface.class).to(GuiceSingleton.class).asEagerSingleton(); } - } } diff --git a/priam/src/test/java/com/netflix/priam/scheduler/TestScheduler.java b/priam/src/test/java/com/netflix/priam/scheduler/TestScheduler.java index 6d0ed0eba..e2da965eb 100644 --- a/priam/src/test/java/com/netflix/priam/scheduler/TestScheduler.java +++ b/priam/src/test/java/com/netflix/priam/scheduler/TestScheduler.java @@ -21,24 +21,20 @@ import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Singleton; -import com.netflix.priam.config.IConfiguration; import com.netflix.priam.TestModule; +import com.netflix.priam.config.IConfiguration; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; -import javax.management.MBeanServerFactory; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -public class TestScheduler -{ +public class TestScheduler { // yuck, but marginally better than using Thread.sleep private static CountDownLatch latch; @Test - public void testSchedule() throws Exception - { + public void testSchedule() throws Exception { latch = new CountDownLatch(1); Injector inject = Guice.createInjector(new TestModule()); PriamScheduler scheduler = inject.getInstance(PriamScheduler.class); @@ -50,9 +46,9 @@ public void testSchedule() throws Exception } @Test - @Ignore("not sure what this test really does, except test countdown latch and thread context switching") - public void testSingleInstanceSchedule() throws Exception - { + @Ignore( + "not sure what this test really does, except test countdown latch and thread context switching") + public void testSingleInstanceSchedule() throws Exception { latch = new CountDownLatch(3); Injector inject = Guice.createInjector(new TestModule()); PriamScheduler scheduler = inject.getInstance(PriamScheduler.class); @@ -65,65 +61,54 @@ public void testSingleInstanceSchedule() throws Exception } @Ignore - public static class TestTask extends Task - { + public static class TestTask extends Task { @Inject - public TestTask(IConfiguration config) - { - // todo: mock the MBeanServer instead, but this will prevent exceptions due to duplicate registrations - super(config, MBeanServerFactory.newMBeanServer()); + public TestTask(IConfiguration config) { + // todo: mock the MBeanServer instead, but this will prevent exceptions due to duplicate + // registrations + super(config); } @Override - public void execute() - { + public void execute() { latch.countDown(); } @Override - public String getName() - { + public String getName() { return "test"; } - } @Ignore @Singleton - public static class SingleTestTask extends Task - { + public static class SingleTestTask extends Task { @Inject - public SingleTestTask(IConfiguration config) - { - super(config, MBeanServerFactory.newMBeanServer()); + public SingleTestTask(IConfiguration config) { + super(config); } - public static int count =0; + public static int count = 0; + @Override - public void execute() - { + public void execute() { ++count; latch.countDown(); - try - { + try { // todo : why is this sleep important? - Thread.sleep(55);//5sec - } - catch (InterruptedException e) - { + Thread.sleep(55); // 5sec + } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } @Override - public String getName() - { + public String getName() { return "test2"; } - public static TaskTimer getTimer() - { + public static TaskTimer getTimer() { return new SimpleTimer("test2", 11L); } } diff --git a/priam/src/test/java/com/netflix/priam/scheduler/TestSimpleTimer.java b/priam/src/test/java/com/netflix/priam/scheduler/TestSimpleTimer.java new file mode 100644 index 000000000..648a483ca --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/scheduler/TestSimpleTimer.java @@ -0,0 +1,59 @@ +package com.netflix.priam.scheduler; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.common.truth.Truth; +import java.text.ParseException; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Date; +import org.junit.Test; +import org.quartz.Trigger; + +public class TestSimpleTimer { + private static final int PERIOD = 10; + private static final Instant START = Instant.EPOCH.plus(5, ChronoUnit.SECONDS); + + @Test + public void sunnyDay() throws ParseException { + assertions(new SimpleTimer("foo", PERIOD, START).getTrigger(), START); + } + + @Test + public void startBeforeEpoch() { + assertThrows( + IllegalArgumentException.class, + () -> new SimpleTimer("foo", PERIOD, Instant.EPOCH.minus(5, ChronoUnit.SECONDS))); + } + + @Test + public void startAtEpoch() throws ParseException { + assertions(new SimpleTimer("foo", PERIOD, Instant.EPOCH).getTrigger(), Instant.EPOCH); + } + + @Test + public void startMoreThanOnePeriodAfterEpoch() throws ParseException { + Instant start = Instant.EPOCH.plus(2 * PERIOD, ChronoUnit.SECONDS); + assertions(new SimpleTimer("foo", PERIOD, start).getTrigger(), start); + } + + @Test + public void negativePeriod() { + assertThrows(IllegalArgumentException.class, () -> new SimpleTimer("foo", -PERIOD, START)); + } + + @Test + public void zeroPeriod() { + assertThrows(IllegalArgumentException.class, () -> new SimpleTimer("foo", 0, START)); + } + + private void assertions(Trigger trigger, Instant start) { + Instant now = Instant.now(); + Instant nextFireTime = trigger.getFireTimeAfter(Date.from(now)).toInstant(); + Truth.assertThat(nextFireTime.getEpochSecond() % PERIOD) + .isEqualTo(start.getEpochSecond() % PERIOD); + Truth.assertThat(nextFireTime).isAtMost(Instant.now().plus(PERIOD, ChronoUnit.SECONDS)); + Truth.assertThat(trigger.getFinalFireTime()).isNull(); + Truth.assertThat(trigger.getEndTime()).isNull(); + } +} diff --git a/priam/src/test/java/com/netflix/priam/services/TestSnapshotMetaService.java b/priam/src/test/java/com/netflix/priam/services/TestSnapshotMetaService.java deleted file mode 100644 index 5d40da666..000000000 --- a/priam/src/test/java/com/netflix/priam/services/TestSnapshotMetaService.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2018 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.netflix.priam.services; - -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.backup.AbstractBackup; -import com.netflix.priam.backup.BRTestModule; -import com.netflix.priam.backupv2.*; -import com.netflix.priam.config.IBackupRestoreConfig; -import com.netflix.priam.scheduler.TaskTimer; -import com.netflix.priam.utils.DateUtil; -import org.apache.cassandra.io.sstable.Component; -import org.apache.commons.io.FileUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileWriter; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Instant; -import java.util.*; - -/** - * Created by aagrawal on 6/20/18. - */ -public class TestSnapshotMetaService { - private static final Logger logger = LoggerFactory.getLogger(TestSnapshotMetaService.class.getName()); - private static Path dummyDataDirectoryLocation; - private static IConfiguration configuration; - private static IBackupRestoreConfig backupRestoreConfig; - private static SnapshotMetaService snapshotMetaService; - private static TestMetaFileReader metaFileReader; - private static PrefixGenerator prefixGenerator; - - @Before - public void setUp() { - Injector injector = Guice.createInjector(new BRTestModule()); - - if (configuration == null) - configuration = injector.getInstance(IConfiguration.class); - - if (backupRestoreConfig == null) - backupRestoreConfig = injector.getInstance(IBackupRestoreConfig.class); - - if (snapshotMetaService == null) - snapshotMetaService = injector.getInstance(SnapshotMetaService.class); - - if (metaFileReader == null) - metaFileReader = new TestMetaFileReader(); - - if (prefixGenerator == null) - prefixGenerator = injector.getInstance(PrefixGenerator.class); - - dummyDataDirectoryLocation = Paths.get(configuration.getDataFileLocation()); - cleanupDir(dummyDataDirectoryLocation); - - } - - @Test - public void testSnapshotMetaServiceEnabled() throws Exception { - TaskTimer taskTimer = SnapshotMetaService.getTimer(backupRestoreConfig); - Assert.assertNotNull(taskTimer); - } - - @Test - public void testPrefix() throws Exception{ - Assert.assertTrue(prefixGenerator.getPrefix().endsWith("ppa-ekaf/1808575600")); - Assert.assertTrue(prefixGenerator.getMetaPrefix().endsWith("ppa-ekaf/1808575600/META")); - } - - @Test - public void testMetaFileName() throws Exception { - String fileName = MetaFileInfo.getMetaFileName(DateUtil.getInstant()); - Path path = Paths.get(dummyDataDirectoryLocation.toFile().getAbsolutePath(), fileName); - Assert.assertTrue(metaFileReader.isValidMetaFile(path)); - path = Paths.get(dummyDataDirectoryLocation.toFile().getAbsolutePath(), fileName + ".tmp"); - Assert.assertFalse(metaFileReader.isValidMetaFile(path)); - } - - private void test(int noOfSstables, int noOfKeyspaces, int noOfCf) throws Exception{ - Instant snapshotInstant = DateUtil.getInstant(); - String snapshotName = snapshotMetaService.generateSnapshotName(snapshotInstant); - generateDummyFiles(dummyDataDirectoryLocation, noOfKeyspaces, noOfCf, noOfSstables, AbstractBackup.SNAPSHOT_FOLDER, snapshotName); - snapshotMetaService.setSnapshotName(snapshotName); - Path metaFileLocation = snapshotMetaService.processSnapshot(snapshotInstant).getMetaFilePath(); - Assert.assertNotNull(metaFileLocation); - Assert.assertTrue(metaFileLocation.toFile().exists()); - Assert.assertTrue(metaFileLocation.toFile().isFile()); - - //Try reading meta file. - metaFileReader.setNoOfSstables(noOfSstables + 1); - metaFileReader.readMeta(metaFileLocation); - - MetaFileInfo metaFileInfo = metaFileReader.getMetaFileInfo(); - Assert.assertEquals(1, metaFileInfo.getVersion()); - Assert.assertEquals(configuration.getAppName(), metaFileInfo.getAppName()); - Assert.assertEquals(configuration.getRac(), metaFileInfo.getRack()); - Assert.assertEquals(configuration.getDC(), metaFileInfo.getRegion()); - - //Cleanup - metaFileLocation.toFile().delete(); - cleanupDir(dummyDataDirectoryLocation); - } - - @Test - public void testMetaFile() throws Exception { - test(5, 1,1); - } - - private void cleanupDir(Path dir){ - if (dir.toFile().exists()) - try { - FileUtils.cleanDirectory(dir.toFile()); - } catch (IOException e) { - e.printStackTrace(); - } - } - - @Test - public void testSize() throws Exception { - test (1000, 2,2); - } - - private void generateDummyFiles(Path dummyDir, int noOfKeyspaces, int noOfCf, int noOfSstables, String backupDir, String snapshotName) throws Exception { - if (dummyDir == null) - dummyDir = dummyDataDirectoryLocation; - - //Clean the dummy directory - if (dummyDir.toFile().exists()) - FileUtils.cleanDirectory(dummyDir.toFile()); - - Random random = new Random(); - - for (int i = 1; i <= noOfKeyspaces; i++) { - String keyspaceName = "sample" + i; - - for (int j = 1; j <= noOfCf; j++) { - String columnfamilyname = "cf" + j; - - for (int k = 1; k <= noOfSstables; k++) { - String prefixName = "mc-" + k + "-big"; - - for (Component.Type type : EnumSet.allOf(Component.Type.class)) { - Path componentPath = Paths.get(dummyDir.toFile().getAbsolutePath(), keyspaceName, columnfamilyname, backupDir, snapshotName, prefixName + "-" + type.name() + ".db"); - componentPath.getParent().toFile().mkdirs(); - try (FileWriter fileWriter = new FileWriter(componentPath.toFile())) { - fileWriter.write(""); - } - - } - } - - Path componentPath = Paths.get(dummyDir.toFile().getAbsolutePath(), keyspaceName, columnfamilyname, backupDir, snapshotName, "manifest.json"); - try(FileWriter fileWriter = new FileWriter(componentPath.toFile())){ - fileWriter.write(""); - } - } - } - } - - public static class TestMetaFileReader extends MetaFileReader { - - private int noOfSstables; - - public void setNoOfSstables(int noOfSstables) { - this.noOfSstables = noOfSstables; - } - - @Override - public void process(ColumnfamilyResult columnfamilyResult) { - Assert.assertEquals(noOfSstables, columnfamilyResult.getSstables().size()); - } - } - - -} diff --git a/priam/src/test/java/com/netflix/priam/stream/StreamingTest.java b/priam/src/test/java/com/netflix/priam/stream/StreamingTest.java index 7efa32ef9..c88953c2c 100644 --- a/priam/src/test/java/com/netflix/priam/stream/StreamingTest.java +++ b/priam/src/test/java/com/netflix/priam/stream/StreamingTest.java @@ -17,88 +17,116 @@ package com.netflix.priam.stream; -import java.io.IOException; - -import org.junit.Assert; - -import org.junit.Test; - import com.google.inject.Guice; import com.google.inject.Injector; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.config.IConfiguration; -import com.netflix.priam.aws.S3BackupPath; +import com.netflix.priam.aws.RemoteBackupPath; import com.netflix.priam.backup.AbstractBackupPath; import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.IConfiguration; import com.netflix.priam.identity.InstanceIdentity; import com.netflix.priam.utils.FifoQueue; +import org.junit.Assert; +import org.junit.Test; -public class StreamingTest -{ - public void teststream() throws IOException, InterruptedException - { - IConfiguration config = new FakeConfiguration("test", "cass_upg107_ccs", "test", "ins_id"); - } +public class StreamingTest { @Test - public void testFifoAddAndRemove() - { - FifoQueue queue = new FifoQueue(10); - for (long i = 0; i < 100; i++) - queue.adjustAndAdd(i); + public void testFifoAddAndRemove() { + FifoQueue queue = new FifoQueue<>(10); + for (long i = 0; i < 100; i++) queue.adjustAndAdd(i); Assert.assertEquals(10, queue.size()); Assert.assertEquals(new Long(90), queue.first()); } @Test - public void testAbstractPath() - { + public void testAbstractPath() { Injector injector = Guice.createInjector(new BRTestModule()); IConfiguration conf = injector.getInstance(IConfiguration.class); InstanceIdentity factory = injector.getInstance(InstanceIdentity.class); - - FifoQueue queue = new FifoQueue(10); - for (int i = 10; i < 30; i++) - { - S3BackupPath path = new S3BackupPath(conf, factory); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108" + i + "0000" + "/SNAP/ks1/cf2/f1" + i + ".db"); + String region = factory.getInstanceInfo().getRegion(); + + FifoQueue queue = new FifoQueue<>(10); + for (int i = 10; i < 30; i++) { + RemoteBackupPath path = new RemoteBackupPath(conf, factory); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108" + + i + + "0000" + + "/SNAP/ks1/cf2/f1" + + i + + ".db"); queue.adjustAndAdd(path); } - for (int i = 10; i < 30; i++) - { - S3BackupPath path = new S3BackupPath(conf, factory); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108" + i + "0000" + "/SNAP/ks1/cf2/f2" + i + ".db"); + for (int i = 10; i < 30; i++) { + RemoteBackupPath path = new RemoteBackupPath(conf, factory); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108" + + i + + "0000" + + "/SNAP/ks1/cf2/f2" + + i + + ".db"); queue.adjustAndAdd(path); } - for (int i = 10; i < 30; i++) - { - S3BackupPath path = new S3BackupPath(conf, factory); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108" + i + "0000" + "/SNAP/ks1/cf2/f3" + i + ".db"); + for (int i = 10; i < 30; i++) { + RemoteBackupPath path = new RemoteBackupPath(conf, factory); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108" + + i + + "0000" + + "/SNAP/ks1/cf2/f3" + + i + + ".db"); queue.adjustAndAdd(path); } - S3BackupPath path = new S3BackupPath(conf, factory); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108290000" + "/SNAP/ks1/cf2/f129.db"); + RemoteBackupPath path = new RemoteBackupPath(conf, factory); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108290000" + + "/SNAP/ks1/cf2/f129.db"); Assert.assertTrue(queue.contains(path)); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108290000" + "/SNAP/ks1/cf2/f229.db"); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108290000" + + "/SNAP/ks1/cf2/f229.db"); Assert.assertTrue(queue.contains(path)); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108290000" + "/SNAP/ks1/cf2/f329.db"); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108290000" + + "/SNAP/ks1/cf2/f329.db"); Assert.assertTrue(queue.contains(path)); - path.parseRemote("test_backup/"+FakeConfiguration.FAKE_REGION+"/fakecluster/123456/201108260000/SNAP/ks1/cf2/f326.db To: cass/data/ks1/cf2/f326.db"); + path.parseRemote( + "test_backup/" + + region + + "/fakecluster/123456/201108260000/SNAP/ks1/cf2/f326.db To: cass/data/ks1/cf2/f326.db"); Assert.assertEquals(path, queue.first()); } @Test - public void testIgnoreIndexFiles() - { - String[] testInputs = new String[] { "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Digest.sha1", - "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Filter.db", "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Data.db", - "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Statistics.db", "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Filter.db", - "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Digest.sha1", "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Statistics.db", "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Data.db" }; - + public void testIgnoreIndexFiles() { + String[] testInputs = + new String[] { + "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Digest.sha1", + "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Filter.db", + "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Data.db", + "User_Authentication_Audit.User_Authentication_Audit_appkey_idx-hc-93-Statistics.db", + "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Filter.db", + "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Digest.sha1", + "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Statistics.db", + "CS_Agents.CS_Agents_supervisorEmpSk_idx-hc-1-Data.db" + }; } - } diff --git a/priam/src/test/java/com/netflix/priam/tuner/JVMOptionTunerTest.java b/priam/src/test/java/com/netflix/priam/tuner/JVMOptionTunerTest.java index 684be39ce..1942704f5 100644 --- a/priam/src/test/java/com/netflix/priam/tuner/JVMOptionTunerTest.java +++ b/priam/src/test/java/com/netflix/priam/tuner/JVMOptionTunerTest.java @@ -17,65 +17,67 @@ package com.netflix.priam.tuner; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import com.netflix.priam.config.FakeConfiguration; import com.netflix.priam.config.IConfiguration; import com.netflix.priam.scheduler.UnsupportedTypeException; -import org.junit.Test; - import java.util.*; import java.util.stream.Collectors; +import org.junit.Test; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Created by aagrawal on 8/29/17. - */ +/** Created by aagrawal on 8/29/17. */ public class JVMOptionTunerTest { private IConfiguration config; JVMOptionsTuner tuner; @Test - public void testCMS() throws Exception - { + public void testCMS() throws Exception { config = new GCConfiguration(GCType.CMS, null, null, null, null); List jvmOptionMap = getConfiguredJVMOptions(config); - //Validate that all CMS options should be uncommented. - long failedVerification = jvmOptionMap.stream().map(jvmOption -> { - GCType gcType = GCTuner.getGCType(jvmOption); - if (gcType != null && gcType != GCType.CMS) - { - return 1; - } - return 0; - }).filter(returncode -> (returncode != 0)).count(); - - if (failedVerification > 0) - throw new Exception ("Failed validation for CMS"); + // Validate that all CMS options should be uncommented. + long failedVerification = + jvmOptionMap + .stream() + .map( + jvmOption -> { + GCType gcType = GCTuner.getGCType(jvmOption); + if (gcType != null && gcType != GCType.CMS) { + return 1; + } + return 0; + }) + .filter(returncode -> (returncode != 0)) + .count(); + + if (failedVerification > 0) throw new Exception("Failed validation for CMS"); } @Test - public void testG1GC() throws Exception - { + public void testG1GC() throws Exception { config = new GCConfiguration(GCType.G1GC, null, null, null, null); List jvmOptionMap = getConfiguredJVMOptions(config); - //Validate that all G1GC options should be uncommented. - long failedVerification = jvmOptionMap.stream().map(jvmOption -> { - GCType gcType = GCTuner.getGCType(jvmOption); - if (gcType != null && gcType != GCType.G1GC) - { - return 1; - } - return 0; - }).filter(returncode -> (returncode != 0)).count(); - - if (failedVerification > 0) - throw new Exception ("Failed validation for G1GC"); + // Validate that all G1GC options should be uncommented. + long failedVerification = + jvmOptionMap + .stream() + .map( + jvmOption -> { + GCType gcType = GCTuner.getGCType(jvmOption); + if (gcType != null && gcType != GCType.G1GC) { + return 1; + } + return 0; + }) + .filter(returncode -> (returncode != 0)) + .count(); + + if (failedVerification > 0) throw new Exception("Failed validation for G1GC"); } @Test - public void testCMSUpsert() throws Exception - { + public void testCMSUpsert() throws Exception { JVMOption option1 = new JVMOption("-Dsample"); JVMOption option2 = new JVMOption("-Dsample2", "10", false, false); JVMOption option3 = new JVMOption("-XX:NumberOfGCLogFiles", "20", false, false); @@ -84,24 +86,35 @@ public void testCMSUpsert() throws Exception JVMOption xmxOption = new JVMOption("-Xmx", "20G", false, true); JVMOption xmsOption = new JVMOption("-Xms", "20G", false, true); - StringBuffer buffer = new StringBuffer(option1.toJVMOptionString() + "," + option2.toJVMOptionString() + "," + option3.toJVMOptionString()); - config = new GCConfiguration(GCType.CMS, null, buffer.toString(), xmnOption.getValue(), xmxOption.getValue()); + StringBuffer buffer = + new StringBuffer( + option1.toJVMOptionString() + + "," + + option2.toJVMOptionString() + + "," + + option3.toJVMOptionString()); + config = + new GCConfiguration( + GCType.CMS, + null, + buffer.toString(), + xmnOption.getValue(), + xmxOption.getValue()); List jvmOptions = getConfiguredJVMOptions(config); - //Verify all the options do exist. + // Verify all the options do exist. assertTrue(jvmOptions.contains(option3)); assertTrue(jvmOptions.contains(option2)); assertTrue(jvmOptions.contains(option1)); - //Verify heap options exist with the value provided. + // Verify heap options exist with the value provided. assertTrue(jvmOptions.contains(xmnOption)); assertTrue(jvmOptions.contains(xmxOption)); assertTrue(jvmOptions.contains(xmsOption)); } @Test - public void testCMSExclude() throws Exception - { + public void testCMSExclude() throws Exception { JVMOption youngHeap = new JVMOption("-Xmn", "3G", false, true); JVMOption maxHeap = new JVMOption("-Xmx", "12G", false, true); @@ -109,11 +122,17 @@ public void testCMSExclude() throws Exception JVMOption option2 = new JVMOption("-XX:NumberOfGCLogFiles", "20", false, false); JVMOption option3 = new JVMOption("-XX:+UseG1GC", null, false, false); - StringBuffer buffer = new StringBuffer(option1.toJVMOptionString() + "," + option2.toJVMOptionString() + "," + option3.toJVMOptionString()); + StringBuffer buffer = + new StringBuffer( + option1.toJVMOptionString() + + "," + + option2.toJVMOptionString() + + "," + + option3.toJVMOptionString()); config = new GCConfiguration(GCType.CMS, buffer.toString(), null, "3G", "12G"); List jvmOptions = getConfiguredJVMOptions(config); - //Verify all the options do not exist. + // Verify all the options do not exist. assertFalse(jvmOptions.contains(option3)); assertFalse(jvmOptions.contains(option2)); assertFalse(jvmOptions.contains(option1)); @@ -124,28 +143,37 @@ public void testCMSExclude() throws Exception } @Test - public void testG1GCUpsertExclude() throws Exception - { + public void testG1GCUpsertExclude() throws Exception { JVMOption youngHeap = new JVMOption("-Xmn", "3G", true, true); JVMOption maxHeap = new JVMOption("-Xmx", "12G", false, true); JVMOption option1 = new JVMOption("-Dsample"); JVMOption option2 = new JVMOption("-Dsample2", "10", false, false); JVMOption option3 = new JVMOption("-XX:NumberOfGCLogFiles", "20", false, false); - StringBuffer upsert = new StringBuffer(option1.toJVMOptionString() + "," + option2.toJVMOptionString() + "," + option3.toJVMOptionString()); + StringBuffer upsert = + new StringBuffer( + option1.toJVMOptionString() + + "," + + option2.toJVMOptionString() + + "," + + option3.toJVMOptionString()); JVMOption option4 = new JVMOption("-XX:NumberOfGCLogFiles", null, false, false); JVMOption option5 = new JVMOption("-XX:+UseG1GC", null, false, false); - StringBuffer exclude = new StringBuffer(option4.toJVMOptionString() + "," + option5.toJVMOptionString()); + StringBuffer exclude = + new StringBuffer(option4.toJVMOptionString() + "," + option5.toJVMOptionString()); - config = new GCConfiguration(GCType.G1GC, exclude.toString(), upsert.toString(), "3G", "12G"); + config = + new GCConfiguration( + GCType.G1GC, exclude.toString(), upsert.toString(), "3G", "12G"); List jvmOptions = getConfiguredJVMOptions(config); // Verify upserts exist assertTrue(jvmOptions.contains(option1)); assertTrue(jvmOptions.contains(option2)); - // Verify exclude exist. This is to prove that if an element is in EXCLUDE, it will always be excluded. + // Verify exclude exist. This is to prove that if an element is in EXCLUDE, it will always + // be excluded. assertFalse(jvmOptions.contains(option3)); assertFalse(jvmOptions.contains(option4)); assertFalse(jvmOptions.contains(option5)); @@ -158,37 +186,39 @@ public void testG1GCUpsertExclude() throws Exception assertTrue(allJVMOptions.contains(youngHeap)); } - private List getConfiguredJVMOptions(IConfiguration config) throws Exception { return getConfiguredJVMOptions(config, true); } - private List getConfiguredJVMOptions(IConfiguration config, boolean filter) throws Exception{ + private List getConfiguredJVMOptions(IConfiguration config, boolean filter) + throws Exception { tuner = new JVMOptionsTuner(config); List configuredJVMOptions = tuner.updateJVMOptions(); if (filter) { - return configuredJVMOptions.stream() - .map(JVMOption::parse) - .filter(jvmOption -> (jvmOption != null)) - .filter(jvmOption -> !jvmOption.isCommented()) - .collect(Collectors.toList()); + return configuredJVMOptions + .stream() + .map(JVMOption::parse) + .filter(jvmOption -> (jvmOption != null)) + .filter(jvmOption -> !jvmOption.isCommented()) + .collect(Collectors.toList()); } else { - return configuredJVMOptions.stream() - .map(JVMOption::parse) - .collect(Collectors.toList()); + return configuredJVMOptions.stream().map(JVMOption::parse).collect(Collectors.toList()); } } - - private class GCConfiguration extends FakeConfiguration{ + private class GCConfiguration extends FakeConfiguration { private GCType gcType; private String configuredJVMExclude; private String configuredJVMUpsert; private String configuredHeapNewSize; private String configuredHeapSize; - GCConfiguration(GCType gcType, String configuredJVMExclude,String configuredJVMUpsert, String configuredHeapNewSize, String configuredHeapSize) - { + GCConfiguration( + GCType gcType, + String configuredJVMExclude, + String configuredJVMUpsert, + String configuredHeapNewSize, + String configuredHeapSize) { this.gcType = gcType; this.configuredJVMExclude = configuredJVMExclude; this.configuredJVMUpsert = configuredJVMUpsert; @@ -197,29 +227,28 @@ private class GCConfiguration extends FakeConfiguration{ } @Override - public GCType getGCType() throws UnsupportedTypeException{ + public GCType getGCType() throws UnsupportedTypeException { return gcType; } @Override - public Map getJVMExcludeSet(){ - return JVMOptionsTuner.parseJVMOptions(configuredJVMExclude); + public String getJVMExcludeSet() { + return configuredJVMExclude; } @Override - public String getHeapSize(){ + public String getHeapSize() { return configuredHeapSize; } @Override - public String getHeapNewSize(){ + public String getHeapNewSize() { return configuredHeapNewSize; } @Override - public Map getJVMUpsertSet(){ - return JVMOptionsTuner.parseJVMOptions(configuredJVMUpsert); + public String getJVMUpsertSet() { + return configuredJVMUpsert; } } - } diff --git a/priam/src/test/java/com/netflix/priam/tuner/StandardTunerTest.java b/priam/src/test/java/com/netflix/priam/tuner/StandardTunerTest.java index 1aec70d9e..b63cb9c4a 100644 --- a/priam/src/test/java/com/netflix/priam/tuner/StandardTunerTest.java +++ b/priam/src/test/java/com/netflix/priam/tuner/StandardTunerTest.java @@ -16,79 +16,230 @@ package com.netflix.priam.tuner; -import java.io.File; +import static org.junit.Assert.assertEquals; import com.google.common.io.Files; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.priam.backup.BRTestModule; +import com.netflix.priam.config.BackupRestoreConfig; import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.config.IBackupRestoreConfig; import com.netflix.priam.config.IConfiguration; +import com.netflix.priam.identity.config.InstanceInfo; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; +import java.nio.charset.Charset; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.assertEquals; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; -public class StandardTunerTest -{ - /* note: these are, more or less, arbitrary paritioner class names. as long as the tests exercise the code, all is good */ +public class StandardTunerTest { + /* note: these are, more or less, arbitrary partitioner class names. as long as the tests exercise the code, all is good */ private static final String A_PARTITIONER = "com.netflix.priam.utils.NonexistentPartitioner"; private static final String RANDOM_PARTITIONER = "org.apache.cassandra.dht.RandomPartitioner"; private static final String MURMUR_PARTITIONER = "org.apache.cassandra.dht.Murmur3Partitioner"; private static final String BOP_PARTITIONER = "org.apache.cassandra.dht.ByteOrderedPartitioner"; - private StandardTuner tuner; + private final StandardTuner tuner; + private final InstanceInfo instanceInfo; + private final IBackupRestoreConfig backupRestoreConfig; + private final File target = new File("/tmp/priam_test.yaml"); + private IConfiguration config; - @Before - public void setup() - { - - IConfiguration config = new FakeConfiguration(); - tuner = new StandardTuner(config); + public StandardTunerTest() { + Injector injector = Guice.createInjector(new BRTestModule()); + this.tuner = injector.getInstance(StandardTuner.class); + this.instanceInfo = injector.getInstance(InstanceInfo.class); + this.backupRestoreConfig = injector.getInstance(BackupRestoreConfig.class); + this.config = injector.getInstance(IConfiguration.class); + File targetDir = new File(config.getYamlLocation()).getParentFile(); + if (!targetDir.exists()) targetDir.mkdirs(); } @Test - public void derivePartitioner_NullYamlEntry() - { + public void derivePartitioner_NullYamlEntry() { String partitioner = tuner.derivePartitioner(null, A_PARTITIONER); assertEquals(A_PARTITIONER, partitioner); } @Test - public void derivePartitioner_EmptyYamlEntry() - { + public void derivePartitioner_EmptyYamlEntry() { String partitioner = tuner.derivePartitioner("", A_PARTITIONER); assertEquals(A_PARTITIONER, partitioner); } @Test - public void derivePartitioner_RandomPartitioner() - { + public void derivePartitioner_RandomPartitioner() { String partitioner = tuner.derivePartitioner(RANDOM_PARTITIONER, RANDOM_PARTITIONER); assertEquals(RANDOM_PARTITIONER, partitioner); } @Test - public void derivePartitioner_MurmurPartitioner() - { + public void derivePartitioner_MurmurPartitioner() { String partitioner = tuner.derivePartitioner(MURMUR_PARTITIONER, MURMUR_PARTITIONER); assertEquals(MURMUR_PARTITIONER, partitioner); } @Test - public void derivePartitioner_BOPPartitionerInYaml() - { + public void derivePartitioner_BOPPartitionerInYaml() { String partitioner = tuner.derivePartitioner(BOP_PARTITIONER, MURMUR_PARTITIONER); assertEquals(BOP_PARTITIONER, partitioner); } @Test - public void derivePartitioner_BOPPartitionerInConfig() - { + public void derivePartitioner_BOPPartitionerInConfig() { String partitioner = tuner.derivePartitioner(RANDOM_PARTITIONER, BOP_PARTITIONER); assertEquals(BOP_PARTITIONER, partitioner); } + @Before + @After + public void cleanup() { + FileUtils.deleteQuietly(target); + } + @Test public void dump() throws Exception { - String target = "/tmp/priam_test.yaml"; - Files.copy(new File("src/main/resources/incr-restore-cassandra.yaml"), new File("/tmp/priam_test.yaml")); - tuner.writeAllProperties(target, "your_host", "YourSeedProvider"); + Files.copy(new File("src/main/resources/incr-restore-cassandra.yaml"), target); + tuner.writeAllProperties(target.getAbsolutePath(), "your_host", "YourSeedProvider"); + } + + @Test + public void addExtraParams() throws Exception { + String cassParamName1 = "client_encryption_options.optional"; + String priamKeyName1 = "Priam.client_encryption.optional"; + String cassParamName2 = "client_encryption_options.keystore_password"; + String priamKeyName2 = "Priam.client_encryption.keystore_password"; + String cassParamName3 = "randomKey"; + String priamKeyName3 = "Priam.randomKey"; + String cassParamName4 = "randomGroup.randomKey"; + String priamKeyName4 = "Priam.randomGroup.randomKey"; + + String extraConfigParam = + String.format( + "%s=%s,%s=%s,%s=%s,%s=%s", + priamKeyName1, + cassParamName1, + priamKeyName2, + cassParamName2, + priamKeyName3, + cassParamName3, + priamKeyName4, + cassParamName4); + Map extraParamValues = new HashMap(); + extraParamValues.put(priamKeyName1, true); + extraParamValues.put(priamKeyName2, "test"); + extraParamValues.put(priamKeyName3, "randomKeyValue"); + extraParamValues.put(priamKeyName4, "randomGroupValue"); + + Map map = + applyFakeConfiguration(new TunerConfiguration(extraConfigParam, extraParamValues)); + Assert.assertEquals("your_host", map.get("listen_address")); + Assert.assertEquals("true", ((Map) map.get("client_encryption_options")).get("optional")); + Assert.assertEquals( + "test", ((Map) map.get("client_encryption_options")).get("keystore_password")); + Assert.assertEquals("randomKeyValue", map.get("randomKey")); + Assert.assertEquals("randomGroupValue", ((Map) map.get("randomGroup")).get("randomKey")); + } + + @Test + public void testDiskAccessMode() throws Exception { + String diskAccessMode = "test_mode"; + Map map = applyFakeConfiguration(new FakeConfiguration().setDiskAccessMode(diskAccessMode)); + Assert.assertEquals(diskAccessMode, map.get("disk_access_mode")); + } + + @Test + public void testRoleManagerOverride() throws Exception { + String roleManagerOverride = "org.apache.cassandra.auth.CustomRoleManager"; + Map map = + applyFakeConfiguration(new FakeConfiguration().setRoleManager(roleManagerOverride)); + Assert.assertEquals(roleManagerOverride, map.get("role_manager")); + } + + private Map applyFakeConfiguration(FakeConfiguration fakeConfiguration) throws Exception { + StandardTuner tuner = + new StandardTuner(fakeConfiguration, backupRestoreConfig, instanceInfo); + Files.copy(new File("src/main/resources/incr-restore-cassandra.yaml"), target); + tuner.writeAllProperties(target.getAbsolutePath(), "your_host", "YourSeedProvider"); + + // Read the tuned file and verify + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(options); + return yaml.load(new FileInputStream(target)); + } + + private class TunerConfiguration extends FakeConfiguration { + String extraConfigParams; + Map extraParamValues; + + TunerConfiguration(String extraConfigParam, Map extraParamValues) { + this.extraConfigParams = extraConfigParam; + this.extraParamValues = extraParamValues; + } + + @Override + public String getCassYamlVal(String priamKey) { + return extraParamValues.getOrDefault(priamKey, "").toString(); + } + + @Override + public String getExtraConfigParams() { + return extraConfigParams; + } + } + + @Test + public void testPropertiesFiles() throws Exception { + FakeConfiguration fake = (FakeConfiguration) config; + File testRackDcFile = new File("src/test/resources/conf/cassandra-rackdc.properties"); + File testYamlFile = new File("src/main/resources/incr-restore-cassandra.yaml"); + String propertiesPath = new File(config.getYamlLocation()).getParentFile().getPath(); + File rackDcFile = + new File( + Paths.get(propertiesPath, "cassandra-rackdc.properties") + .normalize() + .toString()); + File configFile = + new File(Paths.get(propertiesPath, "properties_test.yaml").normalize().toString()); + System.out.println(testRackDcFile); + System.out.println(rackDcFile); + Files.copy(testRackDcFile, rackDcFile); + Files.copy(testYamlFile, configFile); + + try { + fake.fakeProperties.put( + "propertyOverrides.cassandra-rackdc", + "dc=${dc},rack=${rac},ec2_naming_scheme=legacy,dc_suffix=testsuffix"); + + tuner.writeAllProperties(configFile.getPath(), "your_host", "YourSeedProvider"); + Properties prop = new Properties(); + prop.load(new FileReader(rackDcFile)); + assertEquals("us-east-1", prop.getProperty("dc")); + assertEquals("my_zone", prop.getProperty("rack")); + assertEquals("legacy", prop.getProperty("ec2_naming_scheme")); + assertEquals("testsuffix", prop.getProperty("dc_suffix")); + + assertEquals(4, prop.stringPropertyNames().size()); + } finally { + fake.fakeProperties.clear(); + for (String line : Files.readLines(rackDcFile, Charset.defaultCharset())) { + System.out.println(line); + } + + Files.copy(testRackDcFile, rackDcFile); + Files.copy(testYamlFile, configFile); + } } } diff --git a/priam/src/test/java/com/netflix/priam/tuner/dse/DseConfigStub.java b/priam/src/test/java/com/netflix/priam/tuner/dse/DseConfigStub.java index dbc44e16b..ec008e2d2 100644 --- a/priam/src/test/java/com/netflix/priam/tuner/dse/DseConfigStub.java +++ b/priam/src/test/java/com/netflix/priam/tuner/dse/DseConfigStub.java @@ -1,4 +1,3 @@ -package com.netflix.priam.tuner.dse; /* * Copyright 2017 Netflix, Inc. * @@ -15,48 +14,44 @@ * limitations under the License. * */ +package com.netflix.priam.tuner.dse; import com.netflix.priam.config.FakeConfiguration; - import java.util.HashSet; import java.util.Set; -public class DseConfigStub implements IDseConfiguration -{ +public class DseConfigStub implements IDseConfiguration { boolean auditLogEnabled; - public String getDseYamlLocation() - { + public String getDseYamlLocation() { return new FakeConfiguration().getCassHome() + "/resources/dse/conf/dse.yaml"; } - public String getDseDelegatingSnitch() - { + public String getDseDelegatingSnitch() { return null; } - public NodeType getNodeType() - { + public NodeType getNodeType() { return null; } - public boolean isAuditLogEnabled() - { + public boolean isAuditLogEnabled() { return auditLogEnabled; } - public void setAuditLogEnabled(boolean b) - { + public void setAuditLogEnabled(boolean b) { auditLogEnabled = b; } - public String getAuditLogExemptKeyspaces() - { + public String getAuditLogExemptKeyspaces() { return "YourSwellKeyspace"; } - public Set getAuditLogCategories() - { - return new HashSet(){{ this.add(AuditLogCategory.ALL); }}; + public Set getAuditLogCategories() { + return new HashSet() { + { + this.add(AuditLogCategory.ALL); + } + }; } } diff --git a/priam/src/test/java/com/netflix/priam/tuner/dse/DseTunerTest.java b/priam/src/test/java/com/netflix/priam/tuner/dse/DseTunerTest.java index f7698878d..b65f88ae5 100644 --- a/priam/src/test/java/com/netflix/priam/tuner/dse/DseTunerTest.java +++ b/priam/src/test/java/com/netflix/priam/tuner/dse/DseTunerTest.java @@ -1,4 +1,3 @@ -package com.netflix.priam.tuner.dse; /* * Copyright 2017 Netflix, Inc. * @@ -15,22 +14,20 @@ * limitations under the License. * */ +package com.netflix.priam.tuner.dse; +import com.google.common.io.Files; +import com.netflix.priam.config.FakeConfiguration; +import com.netflix.priam.config.IConfiguration; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.Properties; - -import com.google.common.io.Files; -import com.netflix.priam.config.FakeConfiguration; -import com.netflix.priam.config.IConfiguration; - import org.junit.Assert; import org.junit.Before; import org.junit.Test; -public class DseTunerTest -{ +public class DseTunerTest { IConfiguration config; DseConfigStub dseConfig; DseTuner dseTunerYaml; @@ -41,26 +38,21 @@ public class DseTunerTest File targetDseYamlFile; @Before - public void setup() throws IOException - { + public void setup() throws IOException { config = new FakeConfiguration(); dseConfig = new DseConfigStub(); auditLogTunerYaml = new AuditLogTunerYaml(dseConfig); auditLogTunerLog4j = new AuditLogTunerLog4J(config, dseConfig); - dseTunerYaml = new DseTuner(config, dseConfig, auditLogTunerYaml); - dseTunerLog4j = new DseTuner(config, dseConfig, auditLogTunerLog4j); File targetDir = new File(config.getCassHome() + "/conf"); - if(!targetDir.exists()) - targetDir.mkdirs(); + if (!targetDir.exists()) targetDir.mkdirs(); targetFile = new File(config.getCassHome() + AuditLogTunerLog4J.AUDIT_LOG_FILE); Files.copy(new File("src/test/resources/" + AuditLogTunerLog4J.AUDIT_LOG_FILE), targetFile); } @Test - public void auditLogProperties_Enabled() throws IOException - { + public void auditLogProperties_Enabled() throws IOException { dseConfig.setAuditLogEnabled(true); auditLogTunerLog4j.tuneAuditLog(); @@ -70,8 +62,7 @@ public void auditLogProperties_Enabled() throws IOException } @Test - public void auditLogProperties_Disabled() throws IOException - { + public void auditLogProperties_Disabled() throws IOException { dseConfig.setAuditLogEnabled(false); auditLogTunerLog4j.tuneAuditLog(); @@ -81,14 +72,13 @@ public void auditLogProperties_Disabled() throws IOException } /** - * This is different because we test the disabled step using the already used enabled file - * (not a clean copy over of the original props file from the resources dir), and vice versa + * This is different because we test the disabled step using the already used enabled file (not + * a clean copy over of the original props file from the resources dir), and vice versa * * @throws IOException when file is not found or permissions. */ @Test - public void auditLogProperties_ThereAndBackAgain() throws IOException - { + public void auditLogProperties_ThereAndBackAgain() throws IOException { auditLogProperties_Enabled(); auditLogProperties_Disabled(); auditLogProperties_Enabled(); @@ -114,31 +104,38 @@ public void auditLogProperties_ThereAndBackAgain() throws IOException @Test public void auditLogYamlProperties_Enabled() throws IOException { File targetDseDir = new File(config.getCassHome() + "/resources/dse/conf/"); - if(!targetDseDir.exists()) { + if (!targetDseDir.exists()) { targetDseDir.mkdirs(); } int index = dseConfig.getDseYamlLocation().lastIndexOf('/') + 1; - targetDseYamlFile = new File(targetDseDir + dseConfig.getDseYamlLocation().substring(index - 1)); - Files.copy(new File("src/test/resources/conf/" + dseConfig.getDseYamlLocation().substring(index)), targetDseYamlFile); - + targetDseYamlFile = + new File(targetDseDir + dseConfig.getDseYamlLocation().substring(index - 1)); + Files.copy( + new File( + "src/test/resources/conf/" + + dseConfig.getDseYamlLocation().substring(index)), + targetDseYamlFile); dseConfig.setAuditLogEnabled(true); auditLogTunerYaml.tuneAuditLog(); - } @Test public void auditLogYamlProperties_Disabled() throws IOException { File targetDseDir = new File(config.getCassHome() + "/resources/dse/conf/"); - if(!targetDseDir.exists()) { + if (!targetDseDir.exists()) { targetDseDir.mkdirs(); } int index = dseConfig.getDseYamlLocation().lastIndexOf('/') + 1; - targetDseYamlFile = new File(targetDseDir + dseConfig.getDseYamlLocation().substring(index - 1)); - Files.copy(new File("src/test/resources/conf/" + dseConfig.getDseYamlLocation().substring(index)), targetDseYamlFile); - + targetDseYamlFile = + new File(targetDseDir + dseConfig.getDseYamlLocation().substring(index - 1)); + Files.copy( + new File( + "src/test/resources/conf/" + + dseConfig.getDseYamlLocation().substring(index)), + targetDseYamlFile); dseConfig.setAuditLogEnabled(false); auditLogTunerYaml.tuneAuditLog(); diff --git a/priam/src/test/java/com/netflix/priam/utils/BackupFileUtils.java b/priam/src/test/java/com/netflix/priam/utils/BackupFileUtils.java new file mode 100644 index 000000000..24ae8be37 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/utils/BackupFileUtils.java @@ -0,0 +1,90 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.utils; + +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.EnumSet; +import org.apache.cassandra.io.sstable.Component; +import org.apache.commons.io.FileUtils; + +/** Created by aagrawal on 9/23/18. */ +public class BackupFileUtils { + public static void cleanupDir(Path dir) { + if (dir.toFile().exists()) + try { + FileUtils.cleanDirectory(dir.toFile()); + } catch (IOException e) { + e.printStackTrace(); + } + } + + public static void generateDummyFiles( + Path dummyDir, + int noOfKeyspaces, + int noOfCf, + int noOfSstables, + String backupDir, + String snapshotName, + boolean cleanup) + throws Exception { + // Clean the dummy directory + if (cleanup) cleanupDir(dummyDir); + + for (int i = 1; i <= noOfKeyspaces; i++) { + String keyspaceName = "sample" + i; + + for (int j = 1; j <= noOfCf; j++) { + String columnfamilyname = "cf" + j; + + for (int k = 1; k <= noOfSstables; k++) { + String prefixName = "mc-" + k + "-big"; + + for (Component.Type type : EnumSet.allOf(Component.Type.class)) { + Path componentPath = + Paths.get( + dummyDir.toFile().getAbsolutePath(), + keyspaceName, + columnfamilyname, + backupDir, + snapshotName, + prefixName + "-" + type.name() + ".db"); + componentPath.getParent().toFile().mkdirs(); + try (FileWriter fileWriter = new FileWriter(componentPath.toFile())) { + fileWriter.write(""); + } + } + } + + Path componentPath = + Paths.get( + dummyDir.toFile().getAbsolutePath(), + keyspaceName, + columnfamilyname, + backupDir, + snapshotName, + "manifest.json"); + try (FileWriter fileWriter = new FileWriter(componentPath.toFile())) { + fileWriter.write(""); + } + } + } + } +} diff --git a/priam/src/test/java/com/netflix/priam/utils/FakeSleeper.java b/priam/src/test/java/com/netflix/priam/utils/FakeSleeper.java index a0d2b460e..231a5066e 100644 --- a/priam/src/test/java/com/netflix/priam/utils/FakeSleeper.java +++ b/priam/src/test/java/com/netflix/priam/utils/FakeSleeper.java @@ -17,19 +17,14 @@ package com.netflix.priam.utils; -/** - * TODO: Replace with a mock object - */ -public class FakeSleeper implements Sleeper -{ +/** TODO: Replace with a mock object */ +public class FakeSleeper implements Sleeper { @Override - public void sleep(long waitTimeMs) throws InterruptedException - { + public void sleep(long waitTimeMs) throws InterruptedException { // no-op } - public void sleepQuietly(long waitTimeMs) - { - //no-op + public void sleepQuietly(long waitTimeMs) { + // no-op } -} \ No newline at end of file +} diff --git a/priam/src/test/java/com/netflix/priam/utils/Murmur3TokenManagerTest.java b/priam/src/test/java/com/netflix/priam/utils/Murmur3TokenManagerTest.java index cf8ab6a20..a5bcc0e3f 100644 --- a/priam/src/test/java/com/netflix/priam/utils/Murmur3TokenManagerTest.java +++ b/priam/src/test/java/com/netflix/priam/utils/Murmur3TokenManagerTest.java @@ -17,20 +17,19 @@ package com.netflix.priam.utils; +import static com.netflix.priam.utils.TokenManager.MAXIMUM_TOKEN_MURMUR3; +import static com.netflix.priam.utils.TokenManager.MINIMUM_TOKEN_MURMUR3; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import com.google.common.collect.ImmutableList; import com.netflix.priam.config.FakeConfiguration; import com.netflix.priam.config.IConfiguration; -import org.junit.Before; -import org.junit.Test; - import java.math.BigInteger; import java.util.Collections; import java.util.List; - -import static com.netflix.priam.utils.TokenManager.MAXIMUM_TOKEN_MURMUR3; -import static com.netflix.priam.utils.TokenManager.MINIMUM_TOKEN_MURMUR3; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import org.junit.Before; +import org.junit.Test; public class Murmur3TokenManagerTest { private TokenManager tokenManager; @@ -65,9 +64,14 @@ public void initialToken_positionZero() { @Test public void initialToken_offsets_zeroPosition() { - assertEquals(MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(7)), tokenManager.initialToken(1, 0, 7)); - assertEquals(MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(11)), tokenManager.initialToken(2, 0, 11)); - assertEquals(MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(Integer.MAX_VALUE)), + assertEquals( + MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(7)), + tokenManager.initialToken(1, 0, 7)); + assertEquals( + MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(11)), + tokenManager.initialToken(2, 0, 11)); + assertEquals( + MINIMUM_TOKEN_MURMUR3.add(BigInteger.valueOf(Integer.MAX_VALUE)), tokenManager.initialToken(256, 0, Integer.MAX_VALUE)); } @@ -76,17 +80,23 @@ public void initialToken_cannotExceedMaximumToken() { final int maxRingSize = Integer.MAX_VALUE; final int maxPosition = maxRingSize - 1; final int maxOffset = Integer.MAX_VALUE; - assertEquals(1, MAXIMUM_TOKEN_MURMUR3.compareTo(tokenManager.initialToken(maxRingSize, maxPosition, maxOffset))); + assertEquals( + 1, + MAXIMUM_TOKEN_MURMUR3.compareTo( + tokenManager.initialToken(maxRingSize, maxPosition, maxOffset))); } @Test public void createToken() { - assertEquals(MAXIMUM_TOKEN_MURMUR3.subtract(MINIMUM_TOKEN_MURMUR3).divide(BigInteger.valueOf(8 * 32)) + assertEquals( + MAXIMUM_TOKEN_MURMUR3 + .subtract(MINIMUM_TOKEN_MURMUR3) + .divide(BigInteger.valueOf(256)) .multiply(BigInteger.TEN) .add(BigInteger.valueOf(tokenManager.regionOffset("region"))) .add(MINIMUM_TOKEN_MURMUR3) .toString(), - tokenManager.createToken(10, 8, 32, "region")); + tokenManager.createToken(10, 256, "region")); } @Test(expected = IllegalArgumentException.class) @@ -97,33 +107,45 @@ public void findClosestToken_emptyTokenList() { @Test public void findClosestToken_singleTokenList() { final BigInteger onlyToken = BigInteger.valueOf(100); - assertEquals(onlyToken, tokenManager.findClosestToken(BigInteger.TEN, ImmutableList.of(onlyToken))); + assertEquals( + onlyToken, + tokenManager.findClosestToken(BigInteger.TEN, ImmutableList.of(onlyToken))); } @Test public void findClosestToken_multipleTokenList() { - List tokenList = ImmutableList.of(BigInteger.ONE, BigInteger.TEN, BigInteger.valueOf(100)); + List tokenList = + ImmutableList.of(BigInteger.ONE, BigInteger.TEN, BigInteger.valueOf(100)); assertEquals(BigInteger.ONE, tokenManager.findClosestToken(BigInteger.ONE, tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(9), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(9), tokenList)); assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.TEN, tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(12), tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(51), tokenList)); - assertEquals(BigInteger.valueOf(100), tokenManager.findClosestToken(BigInteger.valueOf(56), tokenList)); - assertEquals(BigInteger.valueOf(100), tokenManager.findClosestToken(BigInteger.valueOf(100), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(12), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(51), tokenList)); + assertEquals( + BigInteger.valueOf(100), + tokenManager.findClosestToken(BigInteger.valueOf(56), tokenList)); + assertEquals( + BigInteger.valueOf(100), + tokenManager.findClosestToken(BigInteger.valueOf(100), tokenList)); } @Test public void findClosestToken_tieGoesToLargerToken() { - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(5), - ImmutableList.of(BigInteger.ZERO, BigInteger.TEN))); + assertEquals( + BigInteger.TEN, + tokenManager.findClosestToken( + BigInteger.valueOf(5), ImmutableList.of(BigInteger.ZERO, BigInteger.TEN))); } @Test public void test4Splits() { // example tokens from http://wiki.apache.org/cassandra/Operations - final String expectedTokens = "-9223372036854775808,-4611686018427387904," - + "0,4611686018427387904"; + final String expectedTokens = + "-9223372036854775808,-4611686018427387904," + "0,4611686018427387904"; String[] tokens = expectedTokens.split(","); int splits = tokens.length; for (int i = 0; i < splits; i++) @@ -132,14 +154,15 @@ public void test4Splits() { @Test public void test16Splits() { - final String expectedTokens = "-9223372036854775808,-8070450532247928832," - + "-6917529027641081856,-5764607523034234880," - + "-4611686018427387904,-3458764513820540928," - + "-2305843009213693952,-1152921504606846976," - + "0,1152921504606846976," - + "2305843009213693952,3458764513820540928," - + "4611686018427387904,5764607523034234880," - + "6917529027641081856,8070450532247928832"; + final String expectedTokens = + "-9223372036854775808,-8070450532247928832," + + "-6917529027641081856,-5764607523034234880," + + "-4611686018427387904,-3458764513820540928," + + "-2305843009213693952,-1152921504606846976," + + "0,1152921504606846976," + + "2305843009213693952,3458764513820540928," + + "4611686018427387904,5764607523034234880," + + "6917529027641081856,8070450532247928832"; String[] tokens = expectedTokens.split(","); int splits = tokens.length; for (int i = 0; i < splits; i++) @@ -152,10 +175,13 @@ public void regionOffset() { for (String region1 : allRegions.split(",")) for (String region2 : allRegions.split(",")) { - if (region1.equals(region2)) - continue; - assertFalse("Diffrence seems to be low", - Math.abs(tokenManager.regionOffset(region1) - tokenManager.regionOffset(region2)) < 100); + if (region1.equals(region2)) continue; + assertFalse( + "Diffrence seems to be low", + Math.abs( + tokenManager.regionOffset(region1) + - tokenManager.regionOffset(region2)) + < 100); } } diff --git a/priam/src/test/java/com/netflix/priam/utils/RandomTokenManagerTest.java b/priam/src/test/java/com/netflix/priam/utils/RandomTokenManagerTest.java index 46a6ac8e7..7bf791ede 100644 --- a/priam/src/test/java/com/netflix/priam/utils/RandomTokenManagerTest.java +++ b/priam/src/test/java/com/netflix/priam/utils/RandomTokenManagerTest.java @@ -17,64 +17,61 @@ package com.netflix.priam.utils; -import java.math.BigInteger; -import java.util.Collections; -import java.util.List; - -import org.junit.Test; -import org.junit.Before; - -import com.google.common.collect.ImmutableList; import static com.netflix.priam.utils.TokenManager.MAXIMUM_TOKEN_RANDOM; import static com.netflix.priam.utils.TokenManager.MINIMUM_TOKEN_RANDOM; -import com.netflix.priam.config.FakeConfiguration; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -public class RandomTokenManagerTest -{ +import com.google.common.collect.ImmutableList; +import com.netflix.priam.config.FakeConfiguration; +import java.math.BigInteger; +import java.util.Collections; +import java.util.List; +import org.junit.Before; +import org.junit.Test; + +public class RandomTokenManagerTest { private FakeConfiguration config; private TokenManager tokenManager; @Before - public void setUp() - { + public void setUp() { this.config = new FakeConfiguration(); this.tokenManager = new TokenManager(config); } @Test(expected = IllegalArgumentException.class) - public void initialToken_zeroSize() - { + public void initialToken_zeroSize() { tokenManager.initialToken(0, 0, 1); } @Test(expected = IllegalArgumentException.class) - public void initialToken_negativePosition() - { + public void initialToken_negativePosition() { tokenManager.initialToken(1, -1, 1); } @Test(expected = IllegalArgumentException.class) - public void initialToken_negativeOffset() - { + public void initialToken_negativeOffset() { tokenManager.initialToken(1, 0, -1); } @Test - public void initialToken_positionZero() - { + public void initialToken_positionZero() { assertEquals(MINIMUM_TOKEN_RANDOM, tokenManager.initialToken(1, 0, 0)); assertEquals(MINIMUM_TOKEN_RANDOM, tokenManager.initialToken(10, 0, 0)); assertEquals(MINIMUM_TOKEN_RANDOM, tokenManager.initialToken(133, 0, 0)); } @Test - public void initialToken_offsets_zeroPosition() - { - assertEquals(MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(7)), tokenManager.initialToken(1, 0, 7)); - assertEquals(MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(11)), tokenManager.initialToken(2, 0, 11)); - assertEquals(MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(Integer.MAX_VALUE)), + public void initialToken_offsets_zeroPosition() { + assertEquals( + MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(7)), + tokenManager.initialToken(1, 0, 7)); + assertEquals( + MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(11)), + tokenManager.initialToken(2, 0, 11)); + assertEquals( + MINIMUM_TOKEN_RANDOM.add(BigInteger.valueOf(Integer.MAX_VALUE)), tokenManager.initialToken(256, 0, Integer.MAX_VALUE)); } @@ -83,59 +80,71 @@ public void initialToken_cannotExceedMaximumToken() { final int maxRingSize = Integer.MAX_VALUE; final int maxPosition = maxRingSize - 1; final int maxOffset = Integer.MAX_VALUE; - assertEquals(1, MAXIMUM_TOKEN_RANDOM.compareTo(tokenManager.initialToken(maxRingSize, maxPosition, maxOffset))); + assertEquals( + 1, + MAXIMUM_TOKEN_RANDOM.compareTo( + tokenManager.initialToken(maxRingSize, maxPosition, maxOffset))); } @Test - public void createToken() - { - assertEquals(MAXIMUM_TOKEN_RANDOM.divide(BigInteger.valueOf(8 * 32)) - .multiply(BigInteger.TEN) - .add(BigInteger.valueOf(tokenManager.regionOffset("region"))) - .toString(), - tokenManager.createToken(10, 8, 32, "region")); + public void createToken() { + assertEquals( + MAXIMUM_TOKEN_RANDOM + .divide(BigInteger.valueOf(256)) + .multiply(BigInteger.TEN) + .add(BigInteger.valueOf(tokenManager.regionOffset("region"))) + .toString(), + tokenManager.createToken(10, 256, "region")); } @Test(expected = IllegalArgumentException.class) - public void findClosestToken_emptyTokenList() - { + public void findClosestToken_emptyTokenList() { tokenManager.findClosestToken(BigInteger.ZERO, Collections.emptyList()); } @Test - public void findClosestToken_singleTokenList() - { + public void findClosestToken_singleTokenList() { final BigInteger onlyToken = BigInteger.valueOf(100); - assertEquals(onlyToken, tokenManager.findClosestToken(BigInteger.TEN, ImmutableList.of(onlyToken))); + assertEquals( + onlyToken, + tokenManager.findClosestToken(BigInteger.TEN, ImmutableList.of(onlyToken))); } @Test - public void findClosestToken_multipleTokenList() - { - List tokenList = ImmutableList.of(BigInteger.ONE, BigInteger.TEN, BigInteger.valueOf(100)); + public void findClosestToken_multipleTokenList() { + List tokenList = + ImmutableList.of(BigInteger.ONE, BigInteger.TEN, BigInteger.valueOf(100)); assertEquals(BigInteger.ONE, tokenManager.findClosestToken(BigInteger.ONE, tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(9), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(9), tokenList)); assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.TEN, tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(12), tokenList)); - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(51), tokenList)); - assertEquals(BigInteger.valueOf(100), tokenManager.findClosestToken(BigInteger.valueOf(56), tokenList)); - assertEquals(BigInteger.valueOf(100), tokenManager.findClosestToken(BigInteger.valueOf(100), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(12), tokenList)); + assertEquals( + BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(51), tokenList)); + assertEquals( + BigInteger.valueOf(100), + tokenManager.findClosestToken(BigInteger.valueOf(56), tokenList)); + assertEquals( + BigInteger.valueOf(100), + tokenManager.findClosestToken(BigInteger.valueOf(100), tokenList)); } @Test - public void findClosestToken_tieGoesToLargerToken() - { - assertEquals(BigInteger.TEN, tokenManager.findClosestToken(BigInteger.valueOf(5), - ImmutableList.of(BigInteger.ZERO, BigInteger.TEN))); + public void findClosestToken_tieGoesToLargerToken() { + assertEquals( + BigInteger.TEN, + tokenManager.findClosestToken( + BigInteger.valueOf(5), ImmutableList.of(BigInteger.ZERO, BigInteger.TEN))); } @Test - public void test4Splits() - { + public void test4Splits() { // example tokens from http://wiki.apache.org/cassandra/Operations - final String expectedTokens = "0,42535295865117307932921825928971026432," - + "85070591730234615865843651857942052864,127605887595351923798765477786913079296"; + final String expectedTokens = + "0,42535295865117307932921825928971026432," + + "85070591730234615865843651857942052864,127605887595351923798765477786913079296"; String[] tokens = expectedTokens.split(","); int splits = tokens.length; for (int i = 0; i < splits; i++) @@ -143,16 +152,16 @@ public void test4Splits() } @Test - public void test16Splits() - { - final String expectedTokens = "0,10633823966279326983230456482242756608," - + "21267647932558653966460912964485513216,31901471898837980949691369446728269824," - + "42535295865117307932921825928971026432,53169119831396634916152282411213783040," - + "63802943797675961899382738893456539648,74436767763955288882613195375699296256," - + "85070591730234615865843651857942052864,95704415696513942849074108340184809472," - + "106338239662793269832304564822427566080,116972063629072596815535021304670322688," - + "127605887595351923798765477786913079296,138239711561631250781995934269155835904," - + "148873535527910577765226390751398592512,159507359494189904748456847233641349120"; + public void test16Splits() { + final String expectedTokens = + "0,10633823966279326983230456482242756608," + + "21267647932558653966460912964485513216,31901471898837980949691369446728269824," + + "42535295865117307932921825928971026432,53169119831396634916152282411213783040," + + "63802943797675961899382738893456539648,74436767763955288882613195375699296256," + + "85070591730234615865843651857942052864,95704415696513942849074108340184809472," + + "106338239662793269832304564822427566080,116972063629072596815535021304670322688," + + "127605887595351923798765477786913079296,138239711561631250781995934269155835904," + + "148873535527910577765226390751398592512,159507359494189904748456847233641349120"; String[] tokens = expectedTokens.split(","); int splits = tokens.length; for (int i = 0; i < splits; i++) @@ -160,23 +169,23 @@ public void test16Splits() } @Test - public void regionOffset() - { + public void regionOffset() { String allRegions = "us-west-2,us-east,us-west,eu-east,eu-west,ap-northeast,ap-southeast"; for (String region1 : allRegions.split(",")) - for (String region2 : allRegions.split(",")) - { - if (region1.equals(region2)) - continue; - assertFalse("Diffrence seems to be low", - Math.abs(tokenManager.regionOffset(region1) - tokenManager.regionOffset(region2)) < 100); + for (String region2 : allRegions.split(",")) { + if (region1.equals(region2)) continue; + assertFalse( + "Diffrence seems to be low", + Math.abs( + tokenManager.regionOffset(region1) + - tokenManager.regionOffset(region2)) + < 100); } } @Test - public void testMultiToken() - { + public void testMultiToken() { int h1 = tokenManager.regionOffset("vijay"); int h2 = tokenManager.regionOffset("vijay2"); BigInteger t1 = tokenManager.initialToken(100, 10, h1); diff --git a/priam/src/test/java/com/netflix/priam/utils/TestDateUtils.java b/priam/src/test/java/com/netflix/priam/utils/TestDateUtils.java new file mode 100644 index 000000000..15cc254b3 --- /dev/null +++ b/priam/src/test/java/com/netflix/priam/utils/TestDateUtils.java @@ -0,0 +1,103 @@ +/* + * Copyright 2018 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.netflix.priam.utils; + +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import org.apache.commons.lang3.StringUtils; +import org.junit.Assert; +import org.junit.Test; + +/** Created by aagrawal on 11/29/18. */ +public class TestDateUtils { + + @Test + public void testDateRangeDefault() { + String input = "default"; + Instant now = DateUtil.getInstant(); + DateUtil.DateRange dateRange = new DateUtil.DateRange(input); + + // Start and end should be a day apart. + Assert.assertEquals( + dateRange.getEndTime(), dateRange.getStartTime().plus(1, ChronoUnit.DAYS)); + if (Duration.between(dateRange.getEndTime(), now).getSeconds() > 5) + throw new AssertionError( + String.format( + "End date: %s and now: %s should be almost same", + dateRange.getEndTime(), now)); + } + + @Test + public void testDateRangeEmpty() { + Instant now = DateUtil.getInstant(); + DateUtil.DateRange dateRange = new DateUtil.DateRange(" "); + + // Start and end should be a day apart. + Assert.assertEquals( + dateRange.getEndTime(), dateRange.getStartTime().plus(1, ChronoUnit.DAYS)); + if (Duration.between(dateRange.getEndTime(), now).getSeconds() > 5) + throw new AssertionError( + String.format( + "End date: %s and now: %s should be almost same", + dateRange.getEndTime(), now)); + } + + @Test + public void testDateRangeValues() { + String start = "201801011201"; + String end = "201801051201"; + DateUtil.DateRange dateRange = new DateUtil.DateRange(start + "," + end); + Assert.assertEquals(Instant.ofEpochSecond(1514808060), dateRange.getStartTime()); + Assert.assertEquals(Instant.ofEpochSecond(1515153660), dateRange.getEndTime()); + + start = "20180101"; + end = "20180105"; + dateRange = new DateUtil.DateRange(start + "," + end); + Assert.assertEquals(Instant.ofEpochSecond(1514764800), dateRange.getStartTime()); + Assert.assertEquals(Instant.ofEpochSecond(1515110400), dateRange.getEndTime()); + } + + @Test + public void testDateRangeRandom() { + DateUtil.DateRange dateRange = new DateUtil.DateRange("some,random,values"); + Assert.assertEquals(null, dateRange.getStartTime()); + Assert.assertEquals(null, dateRange.getEndTime()); + } + + @Test + public void testDateRangeMatch() { + Instant dateStart = Instant.ofEpochMilli(1543632497000L); + Instant dateEnd = Instant.ofEpochMilli(1543819697000L); + DateUtil.DateRange dateRange = new DateUtil.DateRange(dateStart, dateEnd); + Assert.assertEquals("1543", dateRange.match()); + + dateRange = new DateUtil.DateRange(dateStart, null); + Assert.assertEquals(StringUtils.EMPTY, dateRange.match()); + } + + @Test + public void testFutureDateRangeValues() { + String start = "202801011201"; + String end = "202801051201"; + DateUtil.DateRange dateRange = new DateUtil.DateRange(start + "," + end); + Assert.assertEquals(Instant.ofEpochSecond(1830340860), dateRange.getStartTime()); + Assert.assertEquals(Instant.ofEpochSecond(1830686460), dateRange.getEndTime()); + Assert.assertEquals("1830", dateRange.match()); + } +} diff --git a/priam/src/test/java/com/netflix/priam/utils/TestGsonJsonSerializer.java b/priam/src/test/java/com/netflix/priam/utils/TestGsonJsonSerializer.java index 809d5b420..08ed259e2 100644 --- a/priam/src/test/java/com/netflix/priam/utils/TestGsonJsonSerializer.java +++ b/priam/src/test/java/com/netflix/priam/utils/TestGsonJsonSerializer.java @@ -17,29 +17,30 @@ package com.netflix.priam.utils; import com.netflix.priam.backup.BackupMetadata; +import com.netflix.priam.backup.BackupVersion; import com.netflix.priam.health.InstanceState; +import java.time.LocalDateTime; +import java.util.Calendar; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.time.LocalDateTime; -import java.util.Calendar; - -/** - * Created by aagrawal on 10/12/17. - */ +/** Created by aagrawal on 10/12/17. */ public class TestGsonJsonSerializer { private static final Logger LOG = LoggerFactory.getLogger(TestGsonJsonSerializer.class); @Test public void testBackupMetaData() throws Exception { - BackupMetadata metadata = new BackupMetadata("123", Calendar.getInstance().getTime()); + BackupMetadata metadata = + new BackupMetadata( + BackupVersion.SNAPSHOT_BACKUP, "123", Calendar.getInstance().getTime()); String json = metadata.toString(); LOG.info(json); - //Deserialize it. - BackupMetadata metadata1 = GsonJsonSerializer.getGson().fromJson(json, BackupMetadata.class); + // Deserialize it. + BackupMetadata metadata1 = + GsonJsonSerializer.getGson().fromJson(json, BackupMetadata.class); LOG.info(metadata1.toString()); Assert.assertEquals(metadata.getSnapshotDate(), metadata1.getSnapshotDate()); Assert.assertEquals(metadata.getToken(), metadata1.getToken()); @@ -53,10 +54,13 @@ public void testRestoreStatus() throws Exception { restoreStatus.setExecutionStartTime(LocalDateTime.now().withSecond(0).withNano(0)); LOG.info(restoreStatus.toString()); - InstanceState.RestoreStatus restoreStatus1 = GsonJsonSerializer.getGson().fromJson(restoreStatus.toString(), InstanceState.RestoreStatus.class); + InstanceState.RestoreStatus restoreStatus1 = + GsonJsonSerializer.getGson() + .fromJson(restoreStatus.toString(), InstanceState.RestoreStatus.class); LOG.info(restoreStatus1.toString()); - Assert.assertEquals(restoreStatus.getExecutionStartTime(), restoreStatus1.getExecutionStartTime()); + Assert.assertEquals( + restoreStatus.getExecutionStartTime(), restoreStatus1.getExecutionStartTime()); Assert.assertEquals(restoreStatus.getStartDateRange(), restoreStatus1.getStartDateRange()); Assert.assertEquals(restoreStatus.getEndDateRange(), restoreStatus1.getEndDateRange()); } diff --git a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemMBean.java b/priam/src/test/java/com/netflix/priam/utils/TestSystemUtils.java similarity index 57% rename from priam/src/main/java/com/netflix/priam/aws/S3FileSystemMBean.java rename to priam/src/test/java/com/netflix/priam/utils/TestSystemUtils.java index 5466d6a39..60a596c0f 100644 --- a/priam/src/main/java/com/netflix/priam/aws/S3FileSystemMBean.java +++ b/priam/src/test/java/com/netflix/priam/utils/TestSystemUtils.java @@ -1,5 +1,5 @@ /* - * Copyright 2013 Netflix, Inc. + * Copyright 2018 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,18 +14,19 @@ * limitations under the License. * */ -package com.netflix.priam.aws; -public interface S3FileSystemMBean { - String MBEAN_NAME = "com.priam.aws.S3FileSystemMBean:name=S3FileSystemMBean"; +package com.netflix.priam.utils; - long downloadCount(); +import org.junit.Assert; +import org.junit.Test; - long uploadCount(); +/** Created by aagrawal on 12/1/18. */ +public class TestSystemUtils { - int getActivecount(); - - long bytesUploaded(); - - long bytesDownloaded(); + @Test + public void testGetDataFromUrl() { + String dummyurl = "https://jsonplaceholder.typicode.com/todos/1"; + String response = SystemUtils.getDataFromUrl(dummyurl); + Assert.assertNotNull(response); + } } diff --git a/priam/src/test/resources/conf/Priam.properties b/priam/src/test/resources/conf/Priam.properties index 324b5ec99..ec6bd81bc 100644 --- a/priam/src/test/resources/conf/Priam.properties +++ b/priam/src/test/resources/conf/Priam.properties @@ -8,6 +8,5 @@ Priam.restore.threads=5 Priam.zones.available="us-east-1a,us-east1c" Priam.backup.incremental.enable=true Priam.backup.commitlog.enable=true -Priam.thrift.port=7102 Priam.backup.commitlog.location="/tmp/commitlog" Priam.snapshot.meta.cron="0 0/2 * 1/1 * ? *" \ No newline at end of file diff --git a/priam/src/test/resources/conf/cassandra-rackdc.properties b/priam/src/test/resources/conf/cassandra-rackdc.properties new file mode 100644 index 000000000..26b9b9de5 --- /dev/null +++ b/priam/src/test/resources/conf/cassandra-rackdc.properties @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These properties are used with GossipingPropertyFileSnitch and will +# indicate the rack and dc for this node +dc=dc1 +rack=rack1 + +# Add a suffix to a datacenter name. Used by the Ec2Snitch and Ec2MultiRegionSnitch +# to append a string to the EC2 region name. +#dc_suffix= + +# Uncomment the following line to make this snitch prefer the internal ip when possible, as the Ec2MultiRegionSnitch does. +# prefer_local=true + +# Datacenter and rack naming convention used by the Ec2Snitch and Ec2MultiRegionSnitch. +# Options are: +# legacy : datacenter name is the part of the availability zone name preceding the last "-" +# when the zone ends in -1 and includes the number if not -1. Rack is the portion of +# the availability zone name following the last "-". +# Examples: us-west-1a => dc: us-west, rack: 1a; us-west-2b => dc: us-west-2, rack: 2b; +# YOU MUST USE THIS VALUE IF YOU ARE UPGRADING A PRE-4.0 CLUSTER +# standard : Default value. datacenter name is the standard AWS region name, including the number. +# rack name is the region plus the availability zone letter. +# Examples: us-west-1a => dc: us-west-1, rack: us-west-1a; us-west-2b => dc: us-west-2, rack: us-west-2b; +# ec2_naming_scheme=standard diff --git a/priam/src/test/resources/conf/jvm.options b/priam/src/test/resources/conf/jvm-server.options similarity index 97% rename from priam/src/test/resources/conf/jvm.options rename to priam/src/test/resources/conf/jvm-server.options index f91466ad0..03378b1d3 100644 --- a/priam/src/test/resources/conf/jvm.options +++ b/priam/src/test/resources/conf/jvm-server.options @@ -53,18 +53,12 @@ # before joining the ring. #-Dcassandra.ring_delay_ms=ms -# Set the port for the Thrift RPC service, which is used for client connections. (Default: 9160) -#-Dcassandra.rpc_port=port - # Set the SSL port for encrypted communication. (Default: 7001) #-Dcassandra.ssl_storage_port=port # Enable or disable the native transport server. See start_native_transport in cassandra.yaml. # cassandra.start_native_transport=true|false -# Enable or disable the Thrift RPC server. (Default: true) -#-Dcassandra.start_rpc=true/false - # Set the port for inter-node communication. (Default: 7000) #-Dcassandra.storage_port=port diff --git a/priam/src/test/resources/gossipInfoSample_1.txt b/priam/src/test/resources/gossipInfoSample_1.txt new file mode 100644 index 000000000..bc8a476dc --- /dev/null +++ b/priam/src/test/resources/gossipInfoSample_1.txt @@ -0,0 +1,105 @@ +/127.0.0.1 + generation:1550517654 + heartbeat:1565153 + STATUS:388:NORMAL,56713727820156410577229101240436610841 + LOAD:1564991:370740.0 + SCHEMA:577:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:9:us-east + RACK:11:1d + RELEASE_VERSION:5:2.1.19.6 + INTERNAL_IP:7:127.0.0.1 + RPC_ADDRESS:4:127.0.0.1 + SEVERITY:1565152:0.0 + NET_VERSION:2:8 + HOST_ID:3:52661ae2-43e8-4183-ae3a-09e6a752ba8a + TOKENS:387: +/127.0.0.2 + generation:1544121750 + heartbeat:1565153 + STATUS:21:NORMAL,28356863910078205288614550621122593220 + LOAD:20962586:1.81589793594E11 + SCHEMA:19398066:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:8:us-east + RACK:10:1c + RELEASE_VERSION:4:2.1.19.5 + INTERNAL_IP:6:127.0.0.2 + RPC_ADDRESS:3:127.0.0.2 + SEVERITY:20962630:0.0 + NET_VERSION:1:8 + HOST_ID:2:5e138f0e-8123-4ef7-9260-c19e48666a99 + TOKENS:20: +/127.0.0.3 + generation:1548877968 + heartbeat:1565153 + STATUS:18:NORMAL,113427455640312821154458202479064646083 + LOAD:6537819:1.81428734494E11 + SCHEMA:4973397:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:8:us-east + RACK:10:1c + RELEASE_VERSION:4:2.1.19.5 + INTERNAL_IP:6:127.0.0.3 + RPC_ADDRESS:3:127.0.0.3 + SEVERITY:6537948:0.0 + NET_VERSION:1:8 + HOST_ID:2:7e5d32b9-5298-42a4-9091-b4d65ca43596 + TOKENS:17: +/127.0.0.4 + generation:1544122124 + heartbeat:1565153 + STATUS:18:NORMAL,1808575600 + LOAD:20961630:1.81437564192E11 + SCHEMA:19397072:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:8:us-east + RACK:10:1e + RELEASE_VERSION:4:2.1.19.5 + INTERNAL_IP:6:127.0.0.4 + RPC_ADDRESS:3:127.0.0.4 + SEVERITY:20961636:16.88311767578125 + NET_VERSION:1:8 + HOST_ID:2:4e4d6165-4c7a-455b-9bc2-bfb7a118417f + TOKENS:17: +/127.0.0.5 + generation:1548451833 + heartbeat:1565153 + STATUS:16:NORMAL,141784319550391026443072753098378663704 + LOAD:7830271:1.80978101023E11 + SCHEMA:6265686:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:8:us-east + RACK:10:1d + RELEASE_VERSION:4:2.1.19.5 + INTERNAL_IP:6:127.0.0.5 + RPC_ADDRESS:3:127.0.0.5 + SEVERITY:7830444:0.0 + NET_VERSION:1:8 + HOST_ID:2:56a88676-37cb-4d87-b30c-be8f75ff3773 + TOKENS:15: +/127.0.0.6 + generation:1547873811 + heartbeat:1565153 + STATUS:16:NORMAL,85070591730234615865843651859750628462 + LOAD:9583414:1.81759352129E11 + SCHEMA:8018940:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:8:us-east + RACK:10:1e + RELEASE_VERSION:4:2.1.19.5 + INTERNAL_IP:6:127.0.0.6 + RPC_ADDRESS:3:127.0.0.6 + SEVERITY:9583509:0.0 + NET_VERSION:1:8 + HOST_ID:2:a8b6c167-2c6b-44be-94e6-c41fa53c7515 + TOKENS:15: +/127.0.0.8 + generation:1551117498 + heartbeat:1565153 + STATUS:279:BOOT_REPLACE,127.0.0.7 + LOAD:989:7.26418875E8 + SCHEMA:100:b3839e65-1930-34f5-bb71-5f35ac844191 + DC:9:us-east + RACK:11:1d + RELEASE_VERSION:5:2.1.19.6 + INTERNAL_IP:7:100.0.0.0 + RPC_ADDRESS:4:100.0.0.0 + SEVERITY:1117:0.0 + NET_VERSION:1:8 + HOST_ID:3:ff004440-b678-4c45-a642-dba905903b84 + TOKENS:278: \ No newline at end of file diff --git a/priam/src/test/resources/incr-restore-cassandra.yaml b/priam/src/test/resources/incr-restore-cassandra.yaml index 9594672e2..39a3c41aa 100755 --- a/priam/src/test/resources/incr-restore-cassandra.yaml +++ b/priam/src/test/resources/incr-restore-cassandra.yaml @@ -317,65 +317,6 @@ native_transport_port: 9042 # native_transport_min_threads: 16 # native_transport_max_threads: 128 - -# Whether to start the thrift rpc server. -start_rpc: true -# The address to bind the Thrift RPC service to -- clients connect -# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if -# you want Thrift to listen on all interfaces. -# -# Leaving this blank has the same effect it does for ListenAddress, -# (i.e. it will be based on the configured hostname of the node). -rpc_address: localhost -# port for Thrift to listen for clients on -rpc_port: 9160 - -# enable or disable keepalive on rpc connections -rpc_keepalive: true - -# Cassandra provides three out-of-the-box options for the RPC Server: -# -# sync -> One thread per thrift connection. For a very large number of clients, memory -# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size -# per thread, and that will correspond to your use of virtual memory (but physical memory -# may be limited depending on use of stack space). -# -# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled -# asynchronously using a small number of threads that does not vary with the amount -# of thrift clients (and thus scales well to many clients). The rpc requests are still -# synchronous (one thread per active request). -# -# The default is sync because on Windows hsha is about 30% slower. On Linux, -# sync/hsha performance is about the same, with hsha of course using less memory. -# -# Alternatively, can provide your own RPC server by providing the fully-qualified class name -# of an o.a.c.t.TServerFactory that can create an instance of it. -rpc_server_type: sync - -# Uncomment rpc_min|max_thread to set request pool size limits. -# -# Regardless of your choice of RPC server (see above), the number of maximum requests in the -# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -# RPC server, it also dictates the number of clients that can be connected at all). -# -# The default is unlimited and thus provide no protection against clients overwhelming the server. You are -# encouraged to set a maximum that makes sense for you in production, but do keep in mind that -# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. -# -# rpc_min_threads: 16 -# rpc_max_threads: 2048 - -# uncomment to set socket buffer sizes on rpc connections -# rpc_send_buff_size_in_bytes: -# rpc_recv_buff_size_in_bytes: - -# Frame size for thrift (maximum field length). -thrift_framed_transport_size_in_mb: 15 - -# The max length of a thrift message, including all fields and -# internal thrift overhead. -thrift_max_message_length_in_mb: 16 - # Set to true to have Cassandra create a hard link to each sstable # flushed or streamed locally in a backups/ subdirectory of the # Keyspace data. Removing these links is the operator's @@ -470,13 +411,6 @@ rpc_timeout_in_ms: 10000 # and the times are synchronized between the nodes. cross_node_timeout: false -# Enable socket timeout for streaming operation. -# When a timeout occurs during streaming, streaming is retried from the start -# of the current file. This *can* involve re-streaming an important amount of -# data, so you should avoid setting the value too low. -# Default value is 0, which never timeout streams. -# streaming_socket_timeout_in_ms: 0 - # phi value that must be reached for a host to be marked down. # most users should never need to adjust this. # phi_convict_threshold: 8 @@ -547,58 +481,6 @@ dynamic_snitch_reset_interval_in_ms: 600000 # until the pinned host was 20% worse than the fastest. dynamic_snitch_badness_threshold: 0.1 -# request_scheduler -- Set this to a class that implements -# RequestScheduler, which will schedule incoming client requests -# according to the specific policy. This is useful for multi-tenancy -# with a single Cassandra cluster. -# NOTE: This is specifically for requests from the client and does -# not affect inter node communication. -# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -# client requests to a node with a separate queue for each -# request_scheduler_id. The scheduler is further customized by -# request_scheduler_options as described below. -request_scheduler: org.apache.cassandra.scheduler.NoScheduler - -# Scheduler Options vary based on the type of scheduler -# NoScheduler - Has no options -# RoundRobin -# - throttle_limit -- The throttle_limit is the number of in-flight -# requests per client. Requests beyond -# that limit are queued up until -# running requests can complete. -# The value of 80 here is twice the number of -# concurrent_reads + concurrent_writes. -# - default_weight -- default_weight is optional and allows for -# overriding the default which is 1. -# - weights -- Weights are optional and will default to 1 or the -# overridden default_weight. The weight translates into how -# many requests are handled during each turn of the -# RoundRobin, based on the scheduler id. -# -# request_scheduler_options: -# throttle_limit: 80 -# default_weight: 5 -# weights: -# Keyspace1: 1 -# Keyspace2: 5 - -# request_scheduler_id -- An identifer based on which to perform -# the request scheduling. Currently the only valid option is keyspace. -# request_scheduler_id: keyspace - -# index_interval controls the sampling of entries from the primrary -# row index in terms of space versus time. The larger the interval, -# the smaller and less effective the sampling will be. In technicial -# terms, the interval coresponds to the number of index entries that -# are skipped between taking each sample. All the sampled entries -# must fit in memory. Generally, a value between 128 and 512 here -# coupled with a large key cache size on CFs results in the best trade -# offs. This value is not often changed, however if you have many -# very small rows (many to an OS page), then increasing this will -# often lower memory usage without a impact on performance. -index_interval: 128 - # Enable or disable inter-node encryption # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher diff --git a/settings.gradle b/settings.gradle index b772ee1bd..dfe759d98 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1 +1,2 @@ +rootProject.name = 'Priam' include 'priam','priam-web','priam-cass-extensions','priam-dse-extensions'